TylerHilbert commited on
Commit
349fec6
·
1 Parent(s): 8721530

Sorted + removed duplicate TorchTitan and opencv

Browse files
Files changed (1) hide show
  1. PyTorchConference2025_GithubRepos.json +869 -881
PyTorchConference2025_GithubRepos.json CHANGED
@@ -1,883 +1,871 @@
1
  [
2
- {
3
- "repo_name": "pytorch",
4
- "repo_link": "https://github.com/pytorch/pytorch",
5
- "category": "machine learning framework",
6
- "github_about_section": "Tensors and Dynamic neural networks in Python with strong GPU acceleration",
7
- "homepage_link": "https://pytorch.org",
8
- "github_topic_closest_fit": "machine-learning"
9
- },
10
- {
11
- "repo_name": "triton",
12
- "repo_link": "https://github.com/triton-lang/triton",
13
- "category": "parallel computing dsl",
14
- "github_about_section": "Development repository for the Triton language and compiler",
15
- "homepage_link": "https://triton-lang.org/",
16
- "github_topic_closest_fit": "parallel-programming"
17
- },
18
- {
19
- "repo_name": "cutlass",
20
- "repo_link": "https://github.com/NVIDIA/cutlass",
21
- "category": "parallel computing",
22
- "github_about_section": "CUDA Templates and Python DSLs for High-Performance Linear Algebra",
23
- "homepage_link": "https://docs.nvidia.com/cutlass/index.html",
24
- "github_topic_closest_fit": "parallel-programming"
25
- },
26
- {
27
- "repo_name": "tilelang",
28
- "repo_link": "https://github.com/tile-ai/tilelang",
29
- "category": "parallel computing dsl",
30
- "github_about_section": "Domain-specific language designed to streamline the development of high-performance GPU/CPU/Accelerators kernels",
31
- "homepage_link": "https://tilelang.com",
32
- "github_topic_closest_fit": "parallel-programming"
33
- },
34
- {
35
- "repo_name": "ThunderKittens",
36
- "repo_link": "https://github.com/HazyResearch/ThunderKittens",
37
- "category": "parallel computing",
38
- "github_about_section": "Tile primitives for speedy kernels",
39
- "homepage_link": "https://hazyresearch.stanford.edu/blog/2024-10-29-tk2",
40
- "github_topic_closest_fit": "parallel-programming"
41
- },
42
- {
43
- "repo_name": "helion",
44
- "repo_link": "https://github.com/pytorch/helion",
45
- "category": "parallel computing dsl",
46
- "github_about_section": "A Python-embedded DSL that makes it easy to write fast, scalable ML kernels with minimal boilerplate.",
47
- "homepage_link": "https://helionlang.com",
48
- "github_topic_closest_fit": "parallel-programming"
49
- },
50
- {
51
- "repo_name": "TileIR",
52
- "repo_link": "https://github.com/microsoft/TileIR",
53
- "category": "parallel computing dsl",
54
- "github_about_section": "TileIR (tile-ir) is a concise domain-specific IR designed to streamline the development of high-performance GPU/CPU kernels (e.g., GEMM, Dequant GEMM, FlashAttention, LinearAttention). By employing a Pythonic syntax with an underlying compiler infrastructure on top of TVM, TileIR allows developers to focus on productivity without sacrificing the low-level optimizations necessary for state-of-the-art performance.",
55
- "github_topic_closest_fit": "parallel-programming"
56
- },
57
- {
58
- "repo_name": "BitBLAS",
59
- "repo_link": "https://github.com/microsoft/BitBLAS",
60
- "category": "BLAS",
61
- "github_about_section": "BitBLAS is a library to support mixed-precision matrix multiplications, especially for quantized LLM deployment."
62
- },
63
- {
64
- "repo_name": "tensorflow",
65
- "repo_link": "https://github.com/tensorflow/tensorflow",
66
- "category": "machine learning framework",
67
- "github_about_section": "An Open Source Machine Learning Framework for Everyone",
68
- "homepage_link": "https://tensorflow.org",
69
- "github_topic_closest_fit": "machine-learning"
70
- },
71
- {
72
- "repo_name": "vllm",
73
- "repo_link": "https://github.com/vllm-project/vllm",
74
- "category": "inference engine",
75
- "github_about_section": "A high-throughput and memory-efficient inference and serving engine for LLMs",
76
- "homepage_link": "https://docs.vllm.ai",
77
- "github_topic_closest_fit": "inference"
78
- },
79
- {
80
- "repo_name": "ollama",
81
- "repo_link": "https://github.com/ollama/ollama",
82
- "category": "inference engine",
83
- "github_about_section": "Get up and running with OpenAI gpt-oss, DeepSeek-R1, Gemma 3 and other models.",
84
- "homepage_link": "https://ollama.com",
85
- "github_topic_closest_fit": "inference"
86
- },
87
- {
88
- "repo_name": "llama.cpp",
89
- "repo_link": "https://github.com/ggml-org/llama.cpp",
90
- "category": "inference engine",
91
- "github_about_section": "LLM inference in C/C++",
92
- "homepage_link": "https://ggml.ai",
93
- "github_topic_closest_fit": "inference"
94
- },
95
- {
96
- "repo_name": "sglang",
97
- "repo_link": "https://github.com/sgl-project/sglang",
98
- "category": "inference engine",
99
- "github_about_section": "SGLang is a fast serving framework for large language models and vision language models.",
100
- "homepage_link": "https://docs.sglang.ai",
101
- "github_topic_closest_fit": "inference"
102
- },
103
- {
104
- "repo_name": "TensorRT",
105
- "repo_link": "https://github.com/NVIDIA/TensorRT",
106
- "category": "inference engine",
107
- "github_about_section": "NVIDIA TensorRT is an SDK for high-performance deep learning inference on NVIDIA GPUs. This repository contains the open source components of TensorRT.",
108
- "homepage_link": "https://developer.nvidia.com/tensorrt",
109
- "github_topic_closest_fit": "inference"
110
- },
111
- {
112
- "repo_name": "onnx",
113
- "repo_link": "https://github.com/onnx/onnx",
114
- "category": "machine learning interoperability",
115
- "github_about_section": "Open standard for machine learning interoperability",
116
- "homepage_link": "https://onnx.ai",
117
- "github_topic_closest_fit": "onnx"
118
- },
119
- {
120
- "repo_name": "executorch",
121
- "repo_link": "https://github.com/pytorch/executorch",
122
- "category": "model compiler",
123
- "github_about_section": "On-device AI across mobile, embedded and edge for PyTorch",
124
- "homepage_link": "https://executorch.ai",
125
- "github_topic_closest_fit": "compiler"
126
- },
127
- {
128
- "repo_name": "ray",
129
- "repo_link": "https://github.com/ray-project/ray",
130
- "category": "ai compute engine",
131
- "github_about_section": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.",
132
- "homepage_link": "https://ray.io",
133
- "github_topic_closest_fit": "machine-learning"
134
- },
135
- {
136
- "repo_name": "jax",
137
- "repo_link": "https://github.com/jax-ml/jax",
138
- "category": "scientific computing",
139
- "github_about_section": "Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more",
140
- "homepage_link": "https://docs.jax.dev",
141
- "github_topic_closest_fit": "scientific-computing"
142
- },
143
- {
144
- "repo_name": "numpy",
145
- "repo_link": "https://github.com/numpy/numpy",
146
- "category": "scientific computing",
147
- "github_about_section": "The fundamental package for scientific computing with Python.",
148
- "homepage_link": "https://numpy.org",
149
- "github_topic_closest_fit": "scientific-computing"
150
- },
151
- {
152
- "repo_name": "scipy",
153
- "repo_link": "https://github.com/scipy/scipy",
154
- "category": "scientific computing",
155
- "github_about_section": "SciPy library main repository",
156
- "homepage_link": "https://scipy.org",
157
- "github_topic_closest_fit": "scientific-computing"
158
- },
159
- {
160
- "repo_name": "numba",
161
- "repo_link": "https://github.com/numba/numba",
162
- "category": "compiler",
163
- "github_about_section": "NumPy aware dynamic Python compiler using LLVM",
164
- "homepage_link": "https://numba.pydata.org",
165
- "github_topic_closest_fit": "compiler"
166
- },
167
- {
168
- "repo_name": "llvm-project",
169
- "repo_link": "https://github.com/llvm/llvm-project",
170
- "category": "compiler",
171
- "github_about_section": "The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.",
172
- "homepage_link": "http://llvm.org",
173
- "github_topic_closest_fit": "compiler"
174
- },
175
- {
176
- "repo_link": "https://github.com/pytorch/ao",
177
- "repo_name": "ao",
178
- "github_about_section": "PyTorch native quantization and sparsity for training and inference",
179
- "homepage_link": "https://pytorch.org/ao/stable/index.html",
180
- "github_topic_closest_fit": "quantization"
181
- },
182
- {
183
- "repo_link": "https://github.com/AMD-AGI/GEAK-agent",
184
- "repo_name": "GEAK-agent",
185
- "github_about_section": "It is an LLM-based AI agent, which can write correct and efficient gpu kernels automatically."
186
- },
187
- {
188
- "repo_link": "https://github.com/block/goose",
189
- "repo_name": "goose",
190
- "github_about_section": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM",
191
- "homepage_link": "https://block.github.io/goose/",
192
- "github_topic_closest_fit": "mcp",
193
- "category": "agent"
194
- },
195
- {
196
- "repo_link": "https://github.com/codelion/openevolve",
197
- "repo_name": "openevolve",
198
- "github_about_section": "Open-source implementation of AlphaEvolve",
199
- "github_topic_closest_fit": "genetic-algorithm"
200
- },
201
- {
202
- "repo_link": "https://github.com/volcengine/verl",
203
- "repo_name": "verl",
204
- "github_about_section": "verl: Volcano Engine Reinforcement Learning for LLMs",
205
- "homepage_link": "https://verl.readthedocs.io/en/latest/index.html"
206
- },
207
- {
208
- "repo_link": "https://github.com/huggingface/peft",
209
- "repo_name": "peft",
210
- "github_about_section": "PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.",
211
- "homepage_link": "https://huggingface.co/docs/peft",
212
- "github_topic_closest_fit": "lora"
213
- },
214
- {
215
- "repo_link": "https://github.com/Dao-AILab/quack",
216
- "repo_name": "quack",
217
- "github_about_section": "A Quirky Assortment of CuTe Kernels",
218
- "category": "kernels"
219
- },
220
- {
221
- "repo_name": "intelliperf",
222
- "repo_link": "https://github.com/AMDResearch/intelliperf",
223
- "category": "performance",
224
- "homepage_link": "https://arxiv.org/html/2508.20258v1",
225
- "github_about_section": "Automated bottleneck detection and solution orchestration",
226
- "github_topic_closest_fit": "profiling"
227
- },
228
- {
229
- "repo_name": "omnitrace",
230
- "repo_link": "https://github.com/ROCm/omnitrace",
231
- "category": "performance testing",
232
- "github_about_section": "Omnitrace: Application Profiling, Tracing, and Analysis",
233
- "homepage_link": "https://rocm.docs.amd.com/projects/omnitrace/en/docs-6.2.4",
234
- "github_topic_closest_fit": "profiling"
235
- },
236
- {
237
- "repo_name": "hatchet",
238
- "repo_link": "https://github.com/LLNL/hatchet",
239
- "category": "performance",
240
- "github_about_section": "Graph-indexed Pandas DataFrames for analyzing hierarchical performance data",
241
- "homepage_link": "https://llnl-hatchet.readthedocs.io",
242
- "github_topic_closest_fit": "profiling"
243
- },
244
- {
245
- "repo_name": "cupti",
246
- "repo_link": "https://github.com/cwpearson/cupti",
247
- "category": "performance",
248
- "github_about_section": "Profile how CUDA applications create and modify data in memory.",
249
- "github_topic_closest_fit": "profiling"
250
- },
251
- {
252
- "repo_link": "https://github.com/letta-ai/letta",
253
- "repo_name": "letta",
254
- "github_about_section": "Letta is the platform for building stateful agents: open AI with advanced memory that can learn and self-improve over time.",
255
- "homepage_link": "https://docs.letta.com/",
256
- "github_topic_closest_fit": "ai-agents"
257
- },
258
- {
259
- "repo_link": "https://github.com/lastmile-ai/mcp-agent",
260
- "repo_name": "mcp-agent",
261
- "github_about_section": "Build effective agents using Model Context Protocol and simple workflow patterns",
262
- "github_topic_closest_fit": "ai-agents"
263
- },
264
- {
265
- "repo_link": "https://github.com/modular/modular",
266
- "repo_name": "modular",
267
- "github_about_section": "The Modular Platform (includes MAX & Mojo)",
268
- "homepage_link": "https://docs.modular.com/",
269
- "github_topic_closest_fit": "mojo"
270
- },
271
- {
272
- "repo_link": "https://github.com/ScalingIntelligence/KernelBench",
273
- "repo_name": "KernelBench",
274
- "github_about_section": "KernelBench: Can LLMs Write GPU Kernels? - Benchmark with Torch -> CUDA problems",
275
- "homepage_link": "https://scalingintelligence.stanford.edu/blogs/kernelbench/",
276
- "github_topic_closest_fit": "benchmark",
277
- "category": "benchmark"
278
- },
279
- {
280
- "repo_link": "https://github.com/thunlp/TritonBench",
281
- "repo_name": "TritonBench",
282
- "github_about_section": "TritonBench: Benchmarking Large Language Model Capabilities for Generating Triton Operators",
283
- "category": "benchmark"
284
- },
285
- {
286
- "repo_link": "https://github.com/flashinfer-ai/flashinfer-bench",
287
- "repo_name": "flashinfer-bench",
288
- "github_about_section": "Building the Virtuous Cycle for AI-driven LLM Systems",
289
- "homepage_link": "https://bench.flashinfer.ai",
290
- "category": "benchmark"
291
- },
292
- {
293
- "repo_link": "https://github.com/laude-institute/terminal-bench",
294
- "repo_name": "terminal-bench",
295
- "github_about_section": "A benchmark for LLMs on complicated tasks in the terminal",
296
- "homepage_link": "https://www.tbench.ai",
297
- "category": "benchmark"
298
- },
299
- {
300
- "repo_link": "https://github.com/SWE-bench/SWE-bench",
301
- "repo_name": "SWE-bench",
302
- "github_about_section": "SWE-bench: Can Language Models Resolve Real-world Github Issues?",
303
- "homepage_link": "https://www.swebench.com",
304
- "github_topic_closest_fit": "benchmark",
305
- "category": "benchmark"
306
- },
307
- {
308
- "repo_link": "https://github.com/gpu-mode/reference-kernels",
309
- "repo_name": "reference-kernels",
310
- "github_about_section": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!",
311
- "homepage_link": "https://gpumode.com",
312
- "github_topic_closest_fit": "gpu",
313
- "category": "kernels"
314
- },
315
- {
316
- "repo_link": "https://github.com/linkedin/Liger-Kernel",
317
- "repo_name": "Liger-Kernel",
318
- "github_about_section": "Efficient Triton Kernels for LLM Training",
319
- "homepage_link": "https://openreview.net/pdf?id=36SjAIT42G",
320
- "github_topic_closest_fit": "triton",
321
- "category": "kernels"
322
- },
323
- {
324
- "repo_link": "https://github.com/huggingface/kernels",
325
- "repo_name": "kernels",
326
- "github_about_section": "Load compute kernels from the Hub",
327
- "category": "kernels"
328
- },
329
- {
330
- "repo_link": "https://github.com/huggingface/kernels-community",
331
- "repo_name": "kernels-community",
332
- "github_about_section": "Kernel sources for https://huggingface.co/kernels-community",
333
- "category": "kernels"
334
- },
335
- {
336
- "repo_link": "https://github.com/unslothai/unsloth",
337
- "repo_name": "unsloth",
338
- "github_about_section": "Fine-tuning & Reinforcement Learning for LLMs. Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.",
339
- "homepage_link": "https://docs.unsloth.ai"
340
- },
341
- {
342
- "repo_name": "transformers",
343
- "repo_link": "https://github.com/huggingface/transformers",
344
- "github_about_section": "Transformers: the model-definition framework for state-of-the-art machine learning models in text, vision, audio, and multimodal models, for both inference and training.",
345
- "homepage_link": "https://huggingface.co/transformers"
346
- },
347
- {
348
- "repo_name": "accelerate",
349
- "repo_link": "https://github.com/huggingface/accelerate",
350
- "github_about_section": "A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support.",
351
- "homepage_link": "https://huggingface.co/docs/accelerate"
352
- },
353
- {
354
- "repo_name": "trl",
355
- "repo_link": "https://github.com/huggingface/trl",
356
- "github_about_section": "Train transformer language models with reinforcement learning.",
357
- "homepage_link": "http://hf.co/docs/trl"
358
- },
359
- {
360
- "repo_link": "https://github.com/jupyterlab/jupyterlab",
361
- "repo_name": "jupyterlab",
362
- "github_about_section": "JupyterLab computational environment.",
363
- "homepage_link": "https://jupyterlab.readthedocs.io/",
364
- "github_topic_closest_fit": "jupyter",
365
- "category": "ui"
366
- },
367
- {
368
- "repo_link": "https://github.com/ROCm/rocm-systems",
369
- "repo_name": "rocm-systems",
370
- "github_about_section": "super repo for rocm systems projects"
371
- },
372
- {
373
- "repo_link": "https://github.com/ROCm/hip",
374
- "repo_name": "hip",
375
- "github_about_section": "HIP: C++ Heterogeneous-Compute Interface for Portability",
376
- "homepage_link": "https://rocmdocs.amd.com/projects/HIP/",
377
- "github_topic_closest_fit": "hip"
378
- },
379
- {
380
- "repo_link": "https://github.com/ROCm/ROCm",
381
- "repo_name": "ROCm",
382
- "github_about_section": "AMD ROCm Software - GitHub Home",
383
- "homepage_link": "https://rocm.docs.amd.com",
384
- "github_topic_closest_fit": "documentation"
385
- },
386
- {
387
- "repo_name": "ZLUDA",
388
- "repo_link": "https://github.com/vosen/ZLUDA",
389
- "category": "CUDA / OpenCL",
390
- "github_about_section": "CUDA on non-NVIDIA GPUs",
391
- "homepage_link": "https://vosen.github.io/ZLUDA/",
392
- "github_topic_closest_fit": "cuda"
393
- },
394
- {
395
- "repo_name": "CU2CL",
396
- "repo_link": "https://github.com/vtsynergy/CU2CL",
397
- "category": "CUDA / OpenCL",
398
- "github_about_section": "A prototype CUDA-to-OpenCL source-to-source translator, built on the Clang compiler framework",
399
- "homepage_link": "http://chrec.cs.vt.edu/cu2cl",
400
- "github_topic_closest_fit": "opencl"
401
- },
402
- {
403
- "repo_name": "pocl",
404
- "repo_link": "https://github.com/pocl/pocl",
405
- "category": "CUDA / OpenCL",
406
- "github_about_section": "pocl - Portable Computing Language",
407
- "homepage_link": "https://portablecl.org",
408
- "github_topic_closest_fit": "opencl"
409
- },
410
- {
411
- "repo_link": "https://github.com/toyaix/triton-runner",
412
- "repo_name": "triton-runner",
413
- "github_about_section": "Multi-Level Triton Runner supporting Python, IR, PTX, and cubin.",
414
- "homepage_link": "https://triton-runner.org",
415
- "github_topic_closest_fit": "triton"
416
- },
417
- {
418
- "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
419
- "repo_name": "Triton-distributed",
420
- "github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
421
- "homepage_link": "https://triton-distributed.readthedocs.io/en/latest"
422
- },
423
- {
424
- "repo_link": "https://github.com/meta-pytorch/tritonparse",
425
- "repo_name": "tritonparse",
426
- "github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
427
- "homepage_link": "https://meta-pytorch.org/tritonparse/",
428
- "github_topic_closest_fit": "triton"
429
- },
430
- {
431
- "repo_link": "https://github.com/Lightning-AI/lightning-thunder",
432
- "repo_name": "lightning-thunder",
433
- "github_about_section": "PyTorch compiler that accelerates training and inference. Get built-in optimizations for performance, memory, parallelism, and easily write your own."
434
- },
435
- {
436
- "repo_link": "https://github.com/pytorch/torchdynamo",
437
- "repo_name": "torchdynamo",
438
- "github_about_section": "A Python-level JIT compiler designed to make unmodified PyTorch programs faster."
439
- },
440
- {
441
- "repo_link": "https://github.com/NVIDIA/nccl",
442
- "repo_name": "nccl",
443
- "github_about_section": "Optimized primitives for collective multi-GPU communication",
444
- "homepage_link": "https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html"
445
- },
446
- {
447
- "repo_link": "https://github.com/ai-dynamo/nixl",
448
- "repo_name": "nixl",
449
- "github_about_section": "NVIDIA Inference Xfer Library (NIXL)"
450
- },
451
- {
452
- "repo_link": "https://github.com/guandeh17/Self-Forcing",
453
- "repo_name": "Self-Forcing",
454
- "github_about_section": "Official codebase for \"Self Forcing: Bridging Training and Inference in Autoregressive Video Diffusion\" (NeurIPS 2025 Spotlight)"
455
-
456
- },
457
- {
458
- "repo_link": "https://github.com/cumulo-autumn/StreamDiffusion",
459
- "repo_name": "StreamDiffusion",
460
- "github_about_section": "StreamDiffusion: A Pipeline-Level Solution for Real-Time Interactive Generation"
461
- },
462
- {
463
- "repo_link": "https://github.com/comfyanonymous/ComfyUI",
464
- "repo_name": "ComfyUI",
465
- "github_about_section": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.",
466
- "homepage_link": "https://www.comfy.org/",
467
- "github_topic_closest_fit": "stable-diffusion"
468
- },
469
- {
470
- "repo_link": "https://github.com/Jeff-LiangF/streamv2v",
471
- "repo_name": "streamv2v",
472
- "github_about_section": "Official Pytorch implementation of StreamV2V.",
473
- "homepage_link": "https://jeff-liangf.github.io/projects/streamv2v/"
474
- },
475
- {
476
- "repo_link": "https://github.com/deepspeedai/DeepSpeed",
477
- "repo_name": "DeepSpeed",
478
- "github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
479
- "homepage_link": "https://www.deepspeed.ai/",
480
- "github_topic_closest_fit": "gpu"
481
- },
482
- {
483
- "repo_link": "https://github.com/triton-inference-server/server",
484
- "repo_name": "server",
485
- "github_about_section": "The Triton Inference Server provides an optimized cloud and edge inferencing solution.",
486
- "homepage_link": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html",
487
- "github_topic_closest_fit": "inference"
488
- },
489
- {
490
- "repo_link": "https://github.com/elastic/elasticsearch",
491
- "repo_name": "elasticsearch",
492
- "github_about_section": "Free and Open Source, Distributed, RESTful Search Engine",
493
- "homepage_link": "https://www.elastic.co/products/elasticsearch",
494
- "github_topic_closest_fit": "search-engine",
495
- "category": "search engine"
496
- },
497
- {
498
- "repo_link": "https://github.com/kubernetes/kubernetes",
499
- "repo_name": "kubernetes",
500
- "github_about_section": "Production-Grade Container Scheduling and Management",
501
- "homepage_link": "https://kubernetes.io",
502
- "github_topic_closest_fit": "containers"
503
- },
504
- {
505
- "repo_link": "https://github.com/modelcontextprotocol/modelcontextprotocol",
506
- "repo_name": "modelcontextprotocol",
507
- "github_about_section": "Specification and documentation for the Model Context Protocol",
508
- "homepage_link": "https://modelcontextprotocol.io"
509
- },
510
- {
511
- "repo_link": "https://github.com/milvus-io/milvus",
512
- "repo_name": "milvus",
513
- "github_about_section": "Milvus is a high-performance, cloud-native vector database built for scalable vector ANN search",
514
- "homepage_link": "https://milvus.io",
515
- "github_topic_closest_fit": "vector-search",
516
- "category": "vector database"
517
- },
518
- {
519
- "repo_link": "https://github.com/gaoj0017/RaBitQ",
520
- "repo_name": "RaBitQ",
521
- "github_about_section": "[SIGMOD 2024] RaBitQ: Quantizing High-Dimensional Vectors with a Theoretical Error Bound for Approximate Nearest Neighbor Search",
522
- "homepage_link": "https://github.com/VectorDB-NTU/RaBitQ-Library",
523
- "github_topic_closest_fit": "nearest-neighbor-search"
524
- },
525
- {
526
- "repo_link": "https://github.com/Airtable/airtable.js",
527
- "repo_name": "airtable.js",
528
- "github_about_section": "Airtable javascript client"
529
- },
530
- {
531
- "repo_link": "https://github.com/mistralai/mistral-inference",
532
- "repo_name": "mistral-inference",
533
- "github_about_section": "Official inference library for Mistral models",
534
- "homepage_link": "https://mistral.ai/",
535
- "github_topic_closest_fit": "llm-inference",
536
- "category": "inference engine"
537
- },
538
- {
539
- "repo_link": "https://github.com/dstackai/dstack",
540
- "repo_name": "dstack",
541
- "github_about_section": "dstack is an open-source control plane for running development, training, and inference jobs on GPUs—across hyperscalers, neoclouds, or on-prem.",
542
- "homepage_link": "https://dstack.ai",
543
- "github_topic_closest_fit": "orchestration"
544
- },
545
- {
546
- "repo_link": "https://github.com/sandialabs/torchdendrite",
547
- "repo_name": "torchdendrite",
548
- "github_about_section": "Dendrites for PyTorch and SNNTorch neural networks",
549
- "category": "machine learning framework"
550
- },
551
- {
552
- "repo_link": "https://github.com/pytorch/torchtitan",
553
- "repo_name": "torchtitan",
554
- "github_about_section": "A PyTorch native platform for training generative AI models"
555
- },
556
- {
557
- "repo_link": "https://github.com/NVIDIA/cudnn-frontend",
558
- "repo_name": "cudnn-frontend",
559
- "github_about_section": "cudnn_frontend provides a c++ wrapper for the cudnn backend API and samples on how to use it"
560
- },
561
- {
562
- "repo_link": "https://github.com/pytorch/ort",
563
- "repo_name": "ort",
564
- "github_about_section": "Accelerate PyTorch models with ONNX Runtime"
565
- },
566
- {
567
- "repo_link": "https://github.com/sgl-project/ome",
568
- "repo_name": "ome",
569
- "github_about_section": "OME is a Kubernetes operator for enterprise-grade management and serving of Large Language Models (LLMs)",
570
- "homepage_link": "http://docs.sglang.ai/ome/",
571
- "github_topic_closest_fit": "k8s"
572
- },
573
- {
574
- "repo_link": "https://github.com/aws-neuron/neuronx-distributed-inference",
575
- "repo_name": "neuronx-distributed-inference"
576
- },
577
- {
578
- "repo_link": "https://github.com/meta-pytorch/monarch",
579
- "repo_name": "monarch",
580
- "github_about_section": "PyTorch Single Controller",
581
- "homepage_link": "https://meta-pytorch.org/monarch"
582
- },
583
- {
584
- "repo_link": "https://github.com/LMCache/LMCache",
585
- "repo_name": "LMCache",
586
- "github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
587
- "homepage_link": "https://lmcache.ai/",
588
- "github_topic_closest_fit": "inference"
589
- },
590
- {
591
- "repo_link": "https://github.com/linux-rdma/rdma-core",
592
- "repo_name": "rdma-core",
593
- "github_about_section": "RDMA core userspace libraries and daemons",
594
- "github_topic_closest_fit": "linux-kernel"
595
- },
596
- {
597
- "repo_link": "https://github.com/Cambridge-ICCS/FTorch",
598
- "repo_name": "FTorch",
599
- "github_about_section": "A library for directly calling PyTorch ML models from Fortran.",
600
- "homepage_link": "https://cambridge-iccs.github.io/FTorch/",
601
- "github_topic_closest_fit": "machine-learning"
602
- },
603
- {
604
- "repo_link": "https://github.com/facebook/hhvm",
605
- "repo_name": "hhvm",
606
- "github_about_section": "A virtual machine for executing programs written in Hack.",
607
- "homepage_link": "https://hhvm.com",
608
- "github_topic_closest_fit": "hack"
609
- },
610
- {
611
- "repo_link": "https://github.com/apache/spark",
612
- "repo_name": "spark",
613
- "github_about_section": "Apache Spark - A unified analytics engine for large-scale data processing",
614
- "homepage_link": "https://spark.apache.org/",
615
- "github_topic_closest_fit": "big-data"
616
- },
617
- {
618
- "repo_link": "https://github.com/ROCm/composable_kernel",
619
- "repo_name": "composable_kernel",
620
- "github_about_section": "Composable Kernel: Performance Portable Programming Model for Machine Learning Tensor Operators",
621
- "homepage_link": "https://rocm.docs.amd.com/projects/composable_kernel/en/latest/"
622
- },
623
- {
624
- "repo_link": "https://github.com/ROCm/aiter",
625
- "repo_name": "aiter",
626
- "github_about_section": "AI Tensor Engine for ROCm"
627
- },
628
- {
629
- "repo_link": "https://github.com/AMD-AGI/torchtitan",
630
- "repo_name": "torchtitan",
631
- "github_about_section": "A PyTorch native platform for training generative AI models"
632
- },
633
- {
634
- "repo_link": "https://github.com/AMD-AGI/hipBLASLt",
635
- "repo_name": "hipBLASLt",
636
- "category": "BLAS",
637
- "github_about_section": "hipBLASLt is a library that provides general matrix-matrix operations with a flexible API and extends functionalities beyond a traditional BLAS library",
638
- "homepage_link": "https://rocm.docs.amd.com/projects/hipBLASLt/en/latest/index.html"
639
- },
640
- {
641
- "repo_link": "https://github.com/AMD-AGI/rocm-torchtitan",
642
- "repo_name": "rocm-torchtitan"
643
- },
644
- {
645
- "repo_link": "https://github.com/HazyResearch/Megakernels",
646
- "repo_name": "Megakernels",
647
- "github_about_section": "kernels, of the mega variety"
648
- },
649
- {
650
- "repo_link": "https://github.com/opencv/opencv",
651
- "repo_name": "opencv",
652
- "github_about_section": "Open Source Computer Vision Library",
653
- "homepage_link": "https://opencv.org",
654
- "github_topic_closest_fit": "image-processing"
655
- },
656
- {
657
- "repo_link": "https://github.com/tracel-ai/burn",
658
- "repo_name": "burn",
659
- "github_about_section": "Burn is a next generation tensor library and Deep Learning Framework that doesn't compromise on flexibility, efficiency and portability.",
660
- "homepage_link": "https://burn.dev",
661
- "github_topic_closest_fit": "machine-learning"
662
- },
663
- {
664
- "repo_link": "https://github.com/OSC/ondemand",
665
- "repo_name": "ondemand",
666
- "github_about_section": "Supercomputing. Seamlessly. Open, Interactive HPC Via the Web",
667
- "homepage_link": "https://openondemand.org/",
668
- "github_topic_closest_fit": "hpc"
669
- },
670
- {
671
- "repo_link": "https://github.com/flashinfer-ai/flashinfer",
672
- "repo_name": "flashinfer",
673
- "github_about_section": "FlashInfer: Kernel Library for LLM Serving",
674
- "homepage_link": "https://flashinfer.ai",
675
- "github_topic_closest_fit": "attention"
676
- },
677
- {
678
- "repo_link": "https://github.com/AutomataLab/cuJSON",
679
- "repo_name": "cuJSON",
680
- "github_about_section": "cuJSON: A Highly Parallel JSON Parser for GPUs"
681
- },
682
- {
683
- "repo_link": "https://github.com/Netflix/metaflow",
684
- "repo_name": "metaflow",
685
- "github_about_section": "Build, Manage and Deploy AI/ML Systems",
686
- "homepage_link": "https://metaflow.org",
687
- "github_topic_closest_fit": "machine-learning"
688
- },
689
- {
690
- "repo_link": "https://github.com/harmonic-ai/IMO2025",
691
- "repo_name": "IMO2025"
692
- },
693
- {
694
- "repo_link": "https://github.com/leanprover/lean4",
695
- "repo_name": "lean4",
696
- "github_about_section": "Lean 4 programming language and theorem prover",
697
- "homepage_link": "https://lean-lang.org",
698
- "github_topic_closest_fit": "lean"
699
- },
700
- {
701
- "repo_link": "https://github.com/NVIDIA/warp",
702
- "repo_name": "warp",
703
- "github_about_section": "A Python framework for accelerated simulation, data generation and spatial computing.",
704
- "homepage_link": "https://nvidia.github.io/warp/",
705
- "github_topic_closest_fit": "gpu"
706
- },
707
- {
708
- "repo_link": "https://github.com/NVIDIA/cuda-python",
709
- "repo_name": "cuda-python",
710
- "category": "CUDA / OpenCL",
711
- "github_about_section": "CUDA Python: Performance meets Productivity",
712
- "homepage_link": "https://nvidia.github.io/cuda-python/"
713
- },
714
- {
715
- "repo_link": "https://github.com/basetenlabs/truss",
716
- "repo_name": "truss",
717
- "github_about_section": "The simplest way to serve AI/ML models in production",
718
- "homepage_link": "https://truss.baseten.co",
719
- "github_topic_closest_fit": "machine-learning"
720
- },
721
- {
722
- "repo_link": "https://github.com/kvcache-ai/Mooncake",
723
- "repo_name": "Mooncake",
724
- "github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
725
- "homepage_link": "https://kvcache-ai.github.io/Mooncake/",
726
- "github_topic_closest_fit": "inference"
727
- },
728
- {
729
- "repo_name": "SYCL-Docs",
730
- "repo_link": "https://github.com/KhronosGroup/SYCL-Docs",
731
- "category": "CUDA / OpenCL",
732
- "github_about_section": "SYCL Open Source Specification",
733
- "github_topic_closest_fit": "opencl"
734
- },
735
- {
736
- "repo_name": "triSYCL",
737
- "repo_link": "https://github.com/triSYCL/triSYCL",
738
- "category": "CUDA / OpenCL",
739
- "github_about_section": "Generic system-wide modern C++ for heterogeneous platforms with SYCL from Khronos Group",
740
- "github_topic_closest_fit": "opencl"
741
- },
742
- {
743
- "repo_name": "AdaptiveCpp",
744
- "repo_link": "https://github.com/AdaptiveCpp/AdaptiveCpp",
745
- "category": "compiler",
746
- "github_about_section": "Compiler for multiple programming models (SYCL, C++ standard parallelism, HIP/CUDA) for CPUs and GPUs from all vendors: The independent, community-driven compiler for C++-based heterogeneous programming models. Lets applications adapt themselves to all the hardware in the system - even at runtime!",
747
- "homepage_link": "https://adaptivecpp.github.io",
748
- "github_topic_closest_fit": "compiler"
749
- },
750
- {
751
- "repo_name": "oneDPL",
752
- "repo_link": "https://github.com/uxlfoundation/oneDPL",
753
- "github_about_section": "oneAPI DPC++ Library (oneDPL)",
754
- "homepage_link": "https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/dpc-library.html"
755
- },
756
- {
757
- "repo_link": "https://github.com/pybind/pybind11",
758
- "repo_name": "pybind11",
759
- "github_about_section": "Seamless operability between C++11 and Python",
760
- "homepage_link": "https://pybind11.readthedocs.io/",
761
- "github_topic_closest_fit": "bindings"
762
- },
763
- {
764
- "repo_link": "https://github.com/andreinechaev/nvcc4jupyter",
765
- "repo_name": "nvcc4jupyter",
766
- "github_about_section": "A plugin for Jupyter Notebook to run CUDA C/C++ code",
767
- "category": "compiler"
768
- },
769
- {
770
- "repo_link": "https://github.com/Reference-LAPACK/lapack",
771
- "repo_name": "lapack",
772
- "github_about_section": "LAPACK development repository",
773
- "github_topic_closest_fit": "linear-algebra"
774
- },
775
- {
776
- "repo_link": "https://github.com/ccache/ccache",
777
- "repo_name": "ccache",
778
- "github_about_section": "ccache – a fast compiler cache",
779
- "homepage_link": "https://ccache.dev"
780
- },
781
- {
782
- "repo_name": "OpenCL-SDK",
783
- "repo_link": "https://github.com/KhronosGroup/OpenCL-SDK",
784
- "category": "CUDA / OpenCL",
785
- "github_about_section": "OpenCL SDK",
786
- "github_topic_closest_fit": "opencl"
787
- },
788
- {
789
- "repo_link": "https://github.com/meta-llama/synthetic-data-kit",
790
- "repo_name": "synthetic-data-kit",
791
- "github_about_section": "Tool for generating high quality Synthetic datasets",
792
- "homepage_link": "https://pypi.org/project/synthetic-data-kit/",
793
- "github_topic_closest_fit": "generation"
794
- },
795
- {
796
- "repo_link": "https://github.com/KhronosGroup/Vulkan-Docs",
797
- "repo_name": "Vulkan-Docs",
798
- "github_about_section": "The Vulkan API Specification and related tools"
799
- },
800
- {
801
- "repo_link": "https://github.com/tensorflow/tflite-micro",
802
- "repo_name": "tflite-micro",
803
- "github_about_section": "Infrastructure to enable deployment of ML models to low-power resource-constrained embedded targets (including microcontrollers and digital signal processors)."
804
- },
805
- {
806
- "repo_link": "https://github.com/Wan-Video/Wan2.2",
807
- "repo_name": "Wan2.2",
808
- "github_about_section": "Wan: Open and Advanced Large-Scale Video Generative Models",
809
- "homepage_link": "https://wan.video",
810
- "github_topic_closest_fit": "video-generation"
811
- },
812
- {
813
- "repo_link": "https://github.com/AMD-AGI/Primus-Turbo",
814
- "repo_name": "Primus-Turbo"
815
- },
816
- {
817
- "repo_link": "https://github.com/ROCm/hipBLAS",
818
- "repo_name": "hipBLAS",
819
- "category": "BLAS",
820
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
821
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
822
- "github_topic_closest_fit": "hip"
823
- },
824
- {
825
- "repo_link": "https://github.com/ROCm/roctracer",
826
- "repo_name": "roctracer",
827
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-systems repo",
828
- "homepage_link": "https://github.com/ROCm/rocm-systems"
829
- },
830
- {
831
- "repo_link": "https://github.com/ROCm/rocSOLVER",
832
- "repo_name": "rocSOLVER",
833
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
834
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
835
- "github_topic_closest_fit": "rocm"
836
- },
837
- {
838
- "repo_link": "https://github.com/ROCm/Tensile",
839
- "repo_name": "Tensile",
840
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
841
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
842
- "github_topic_closest_fit": "gpu"
843
- },
844
- {
845
- "repo_link": "https://github.com/ROCm/rocPRIM",
846
- "repo_name": "rocPRIM",
847
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
848
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
849
- "github_topic_closest_fit": "hip"
850
- },
851
- {
852
- "repo_link": "https://github.com/ROCm/hipCUB",
853
- "repo_name": "hipCUB",
854
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
855
- "homepage_link": "https://github.com/ROCm/rocm-libraries"
856
- },
857
- {
858
- "repo_link": "https://github.com/ROCm/rocFFT",
859
- "repo_name": "rocFFT",
860
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
861
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
862
- "github_topic_closest_fit": "hip"
863
- },
864
- {
865
- "repo_link": "https://github.com/ROCm/rocSPARSE",
866
- "repo_name": "rocSPARSE",
867
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
868
- "homepage_link": "https://github.com/ROCm/rocm-libraries"
869
- },
870
- {
871
- "repo_link": "https://github.com/ROCm/rocRAND",
872
- "repo_name": "rocRAND",
873
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
874
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
875
- "github_topic_closest_fit": "hip"
876
- },
877
- {
878
- "repo_link": "https://github.com/ROCm/MIOpen",
879
- "repo_name": "MIOpen",
880
- "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
881
- "homepage_link": "https://github.com/ROCm/rocm-libraries"
882
- }
883
  ]
 
1
  [
2
+ {
3
+ "repo_name": "goose",
4
+ "repo_link": "https://github.com/block/goose",
5
+ "category": "agent",
6
+ "github_about_section": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM",
7
+ "homepage_link": "https://block.github.io/goose/",
8
+ "github_topic_closest_fit": "mcp"
9
+ },
10
+ {
11
+ "repo_name": "ray",
12
+ "repo_link": "https://github.com/ray-project/ray",
13
+ "category": "ai compute engine",
14
+ "github_about_section": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.",
15
+ "homepage_link": "https://ray.io",
16
+ "github_topic_closest_fit": "machine-learning"
17
+ },
18
+ {
19
+ "repo_name": "flashinfer-bench",
20
+ "repo_link": "https://github.com/flashinfer-ai/flashinfer-bench",
21
+ "category": "benchmark",
22
+ "github_about_section": "Building the Virtuous Cycle for AI-driven LLM Systems",
23
+ "homepage_link": "https://bench.flashinfer.ai"
24
+ },
25
+ {
26
+ "repo_name": "KernelBench",
27
+ "repo_link": "https://github.com/ScalingIntelligence/KernelBench",
28
+ "category": "benchmark",
29
+ "github_about_section": "KernelBench: Can LLMs Write GPU Kernels? - Benchmark with Torch -> CUDA problems",
30
+ "homepage_link": "https://scalingintelligence.stanford.edu/blogs/kernelbench/",
31
+ "github_topic_closest_fit": "benchmark"
32
+ },
33
+ {
34
+ "repo_name": "SWE-bench",
35
+ "repo_link": "https://github.com/SWE-bench/SWE-bench",
36
+ "category": "benchmark",
37
+ "github_about_section": "SWE-bench: Can Language Models Resolve Real-world Github Issues?",
38
+ "homepage_link": "https://www.swebench.com",
39
+ "github_topic_closest_fit": "benchmark"
40
+ },
41
+ {
42
+ "repo_name": "terminal-bench",
43
+ "repo_link": "https://github.com/laude-institute/terminal-bench",
44
+ "category": "benchmark",
45
+ "github_about_section": "A benchmark for LLMs on complicated tasks in the terminal",
46
+ "homepage_link": "https://www.tbench.ai"
47
+ },
48
+ {
49
+ "repo_name": "TritonBench",
50
+ "repo_link": "https://github.com/thunlp/TritonBench",
51
+ "category": "benchmark",
52
+ "github_about_section": "TritonBench: Benchmarking Large Language Model Capabilities for Generating Triton Operators"
53
+ },
54
+ {
55
+ "repo_name": "BitBLAS",
56
+ "repo_link": "https://github.com/microsoft/BitBLAS",
57
+ "category": "BLAS",
58
+ "github_about_section": "BitBLAS is a library to support mixed-precision matrix multiplications, especially for quantized LLM deployment."
59
+ },
60
+ {
61
+ "repo_name": "hipBLAS",
62
+ "repo_link": "https://github.com/ROCm/hipBLAS",
63
+ "category": "BLAS",
64
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
65
+ "homepage_link": "https://github.com/ROCm/rocm-libraries",
66
+ "github_topic_closest_fit": "hip"
67
+ },
68
+ {
69
+ "repo_name": "hipBLASLt",
70
+ "repo_link": "https://github.com/AMD-AGI/hipBLASLt",
71
+ "category": "BLAS",
72
+ "github_about_section": "hipBLASLt is a library that provides general matrix-matrix operations with a flexible API and extends functionalities beyond a traditional BLAS library",
73
+ "homepage_link": "https://rocm.docs.amd.com/projects/hipBLASLt/en/latest/index.html"
74
+ },
75
+ {
76
+ "repo_name": "AdaptiveCpp",
77
+ "repo_link": "https://github.com/AdaptiveCpp/AdaptiveCpp",
78
+ "category": "compiler",
79
+ "github_about_section": "Compiler for multiple programming models (SYCL, C++ standard parallelism, HIP/CUDA) for CPUs and GPUs from all vendors: The independent, community-driven compiler for C++-based heterogeneous programming models. Lets applications adapt themselves to all the hardware in the system - even at runtime!",
80
+ "homepage_link": "https://adaptivecpp.github.io",
81
+ "github_topic_closest_fit": "compiler"
82
+ },
83
+ {
84
+ "repo_name": "llvm-project",
85
+ "repo_link": "https://github.com/llvm/llvm-project",
86
+ "category": "compiler",
87
+ "github_about_section": "The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.",
88
+ "homepage_link": "http://llvm.org",
89
+ "github_topic_closest_fit": "compiler"
90
+ },
91
+ {
92
+ "repo_name": "numba",
93
+ "repo_link": "https://github.com/numba/numba",
94
+ "category": "compiler",
95
+ "github_about_section": "NumPy aware dynamic Python compiler using LLVM",
96
+ "homepage_link": "https://numba.pydata.org",
97
+ "github_topic_closest_fit": "compiler"
98
+ },
99
+ {
100
+ "repo_name": "nvcc4jupyter",
101
+ "repo_link": "https://github.com/andreinechaev/nvcc4jupyter",
102
+ "category": "compiler",
103
+ "github_about_section": "A plugin for Jupyter Notebook to run CUDA C/C++ code"
104
+ },
105
+ {
106
+ "repo_name": "CU2CL",
107
+ "repo_link": "https://github.com/vtsynergy/CU2CL",
108
+ "category": "CUDA / OpenCL",
109
+ "github_about_section": "A prototype CUDA-to-OpenCL source-to-source translator, built on the Clang compiler framework",
110
+ "homepage_link": "http://chrec.cs.vt.edu/cu2cl",
111
+ "github_topic_closest_fit": "opencl"
112
+ },
113
+ {
114
+ "repo_name": "cuda-python",
115
+ "repo_link": "https://github.com/NVIDIA/cuda-python",
116
+ "category": "CUDA / OpenCL",
117
+ "github_about_section": "CUDA Python: Performance meets Productivity",
118
+ "homepage_link": "https://nvidia.github.io/cuda-python/"
119
+ },
120
+ {
121
+ "repo_name": "OpenCL-SDK",
122
+ "repo_link": "https://github.com/KhronosGroup/OpenCL-SDK",
123
+ "category": "CUDA / OpenCL",
124
+ "github_about_section": "OpenCL SDK",
125
+ "github_topic_closest_fit": "opencl"
126
+ },
127
+ {
128
+ "repo_name": "pocl",
129
+ "repo_link": "https://github.com/pocl/pocl",
130
+ "category": "CUDA / OpenCL",
131
+ "github_about_section": "pocl - Portable Computing Language",
132
+ "homepage_link": "https://portablecl.org",
133
+ "github_topic_closest_fit": "opencl"
134
+ },
135
+ {
136
+ "repo_name": "SYCL-Docs",
137
+ "repo_link": "https://github.com/KhronosGroup/SYCL-Docs",
138
+ "category": "CUDA / OpenCL",
139
+ "github_about_section": "SYCL Open Source Specification",
140
+ "github_topic_closest_fit": "opencl"
141
+ },
142
+ {
143
+ "repo_name": "triSYCL",
144
+ "repo_link": "https://github.com/triSYCL/triSYCL",
145
+ "category": "CUDA / OpenCL",
146
+ "github_about_section": "Generic system-wide modern C++ for heterogeneous platforms with SYCL from Khronos Group",
147
+ "github_topic_closest_fit": "opencl"
148
+ },
149
+ {
150
+ "repo_name": "ZLUDA",
151
+ "repo_link": "https://github.com/vosen/ZLUDA",
152
+ "category": "CUDA / OpenCL",
153
+ "github_about_section": "CUDA on non-NVIDIA GPUs",
154
+ "homepage_link": "https://vosen.github.io/ZLUDA/",
155
+ "github_topic_closest_fit": "cuda"
156
+ },
157
+ {
158
+ "repo_name": "llama.cpp",
159
+ "repo_link": "https://github.com/ggml-org/llama.cpp",
160
+ "category": "inference engine",
161
+ "github_about_section": "LLM inference in C/C++",
162
+ "homepage_link": "https://ggml.ai",
163
+ "github_topic_closest_fit": "inference"
164
+ },
165
+ {
166
+ "repo_name": "mistral-inference",
167
+ "repo_link": "https://github.com/mistralai/mistral-inference",
168
+ "category": "inference engine",
169
+ "github_about_section": "Official inference library for Mistral models",
170
+ "homepage_link": "https://mistral.ai/",
171
+ "github_topic_closest_fit": "llm-inference"
172
+ },
173
+ {
174
+ "repo_name": "ollama",
175
+ "repo_link": "https://github.com/ollama/ollama",
176
+ "category": "inference engine",
177
+ "github_about_section": "Get up and running with OpenAI gpt-oss, DeepSeek-R1, Gemma 3 and other models.",
178
+ "homepage_link": "https://ollama.com",
179
+ "github_topic_closest_fit": "inference"
180
+ },
181
+ {
182
+ "repo_name": "sglang",
183
+ "repo_link": "https://github.com/sgl-project/sglang",
184
+ "category": "inference engine",
185
+ "github_about_section": "SGLang is a fast serving framework for large language models and vision language models.",
186
+ "homepage_link": "https://docs.sglang.ai",
187
+ "github_topic_closest_fit": "inference"
188
+ },
189
+ {
190
+ "repo_name": "TensorRT",
191
+ "repo_link": "https://github.com/NVIDIA/TensorRT",
192
+ "category": "inference engine",
193
+ "github_about_section": "NVIDIA TensorRT is an SDK for high-performance deep learning inference on NVIDIA GPUs. This repository contains the open source components of TensorRT.",
194
+ "homepage_link": "https://developer.nvidia.com/tensorrt",
195
+ "github_topic_closest_fit": "inference"
196
+ },
197
+ {
198
+ "repo_name": "vllm",
199
+ "repo_link": "https://github.com/vllm-project/vllm",
200
+ "category": "inference engine",
201
+ "github_about_section": "A high-throughput and memory-efficient inference and serving engine for LLMs",
202
+ "homepage_link": "https://docs.vllm.ai",
203
+ "github_topic_closest_fit": "inference"
204
+ },
205
+ {
206
+ "repo_name": "kernels",
207
+ "repo_link": "https://github.com/huggingface/kernels",
208
+ "category": "kernels",
209
+ "github_about_section": "Load compute kernels from the Hub"
210
+ },
211
+ {
212
+ "repo_name": "kernels-community",
213
+ "repo_link": "https://github.com/huggingface/kernels-community",
214
+ "category": "kernels",
215
+ "github_about_section": "Kernel sources for https://huggingface.co/kernels-community"
216
+ },
217
+ {
218
+ "repo_name": "Liger-Kernel",
219
+ "repo_link": "https://github.com/linkedin/Liger-Kernel",
220
+ "category": "kernels",
221
+ "github_about_section": "Efficient Triton Kernels for LLM Training",
222
+ "homepage_link": "https://openreview.net/pdf?id=36SjAIT42G",
223
+ "github_topic_closest_fit": "triton"
224
+ },
225
+ {
226
+ "repo_name": "quack",
227
+ "repo_link": "https://github.com/Dao-AILab/quack",
228
+ "category": "kernels",
229
+ "github_about_section": "A Quirky Assortment of CuTe Kernels"
230
+ },
231
+ {
232
+ "repo_name": "reference-kernels",
233
+ "repo_link": "https://github.com/gpu-mode/reference-kernels",
234
+ "category": "kernels",
235
+ "github_about_section": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!",
236
+ "homepage_link": "https://gpumode.com",
237
+ "github_topic_closest_fit": "gpu"
238
+ },
239
+ {
240
+ "repo_name": "pytorch",
241
+ "repo_link": "https://github.com/pytorch/pytorch",
242
+ "category": "machine learning framework",
243
+ "github_about_section": "Tensors and Dynamic neural networks in Python with strong GPU acceleration",
244
+ "homepage_link": "https://pytorch.org",
245
+ "github_topic_closest_fit": "machine-learning"
246
+ },
247
+ {
248
+ "repo_name": "tensorflow",
249
+ "repo_link": "https://github.com/tensorflow/tensorflow",
250
+ "category": "machine learning framework",
251
+ "github_about_section": "An Open Source Machine Learning Framework for Everyone",
252
+ "homepage_link": "https://tensorflow.org",
253
+ "github_topic_closest_fit": "machine-learning"
254
+ },
255
+ {
256
+ "repo_name": "torchdendrite",
257
+ "repo_link": "https://github.com/sandialabs/torchdendrite",
258
+ "category": "machine learning framework",
259
+ "github_about_section": "Dendrites for PyTorch and SNNTorch neural networks"
260
+ },
261
+ {
262
+ "repo_name": "onnx",
263
+ "repo_link": "https://github.com/onnx/onnx",
264
+ "category": "machine learning interoperability",
265
+ "github_about_section": "Open standard for machine learning interoperability",
266
+ "homepage_link": "https://onnx.ai",
267
+ "github_topic_closest_fit": "onnx"
268
+ },
269
+ {
270
+ "repo_name": "executorch",
271
+ "repo_link": "https://github.com/pytorch/executorch",
272
+ "category": "model compiler",
273
+ "github_about_section": "On-device AI across mobile, embedded and edge for PyTorch",
274
+ "homepage_link": "https://executorch.ai",
275
+ "github_topic_closest_fit": "compiler"
276
+ },
277
+ {
278
+ "repo_name": "cutlass",
279
+ "repo_link": "https://github.com/NVIDIA/cutlass",
280
+ "category": "parallel computing",
281
+ "github_about_section": "CUDA Templates and Python DSLs for High-Performance Linear Algebra",
282
+ "homepage_link": "https://docs.nvidia.com/cutlass/index.html",
283
+ "github_topic_closest_fit": "parallel-programming"
284
+ },
285
+ {
286
+ "repo_name": "ThunderKittens",
287
+ "repo_link": "https://github.com/HazyResearch/ThunderKittens",
288
+ "category": "parallel computing",
289
+ "github_about_section": "Tile primitives for speedy kernels",
290
+ "homepage_link": "https://hazyresearch.stanford.edu/blog/2024-10-29-tk2",
291
+ "github_topic_closest_fit": "parallel-programming"
292
+ },
293
+ {
294
+ "repo_name": "helion",
295
+ "repo_link": "https://github.com/pytorch/helion",
296
+ "category": "parallel computing dsl",
297
+ "github_about_section": "A Python-embedded DSL that makes it easy to write fast, scalable ML kernels with minimal boilerplate.",
298
+ "homepage_link": "https://helionlang.com",
299
+ "github_topic_closest_fit": "parallel-programming"
300
+ },
301
+ {
302
+ "repo_name": "TileIR",
303
+ "repo_link": "https://github.com/microsoft/TileIR",
304
+ "category": "parallel computing dsl",
305
+ "github_about_section": "TileIR (tile-ir) is a concise domain-specific IR designed to streamline the development of high-performance GPU/CPU kernels (e.g., GEMM, Dequant GEMM, FlashAttention, LinearAttention). By employing a Pythonic syntax with an underlying compiler infrastructure on top of TVM, TileIR allows developers to focus on productivity without sacrificing the low-level optimizations necessary for state-of-the-art performance.",
306
+ "github_topic_closest_fit": "parallel-programming"
307
+ },
308
+ {
309
+ "repo_name": "tilelang",
310
+ "repo_link": "https://github.com/tile-ai/tilelang",
311
+ "category": "parallel computing dsl",
312
+ "github_about_section": "Domain-specific language designed to streamline the development of high-performance GPU/CPU/Accelerators kernels",
313
+ "homepage_link": "https://tilelang.com",
314
+ "github_topic_closest_fit": "parallel-programming"
315
+ },
316
+ {
317
+ "repo_name": "triton",
318
+ "repo_link": "https://github.com/triton-lang/triton",
319
+ "category": "parallel computing dsl",
320
+ "github_about_section": "Development repository for the Triton language and compiler",
321
+ "homepage_link": "https://triton-lang.org/",
322
+ "github_topic_closest_fit": "parallel-programming"
323
+ },
324
+ {
325
+ "repo_name": "cupti",
326
+ "repo_link": "https://github.com/cwpearson/cupti",
327
+ "category": "performance",
328
+ "github_about_section": "Profile how CUDA applications create and modify data in memory.",
329
+ "github_topic_closest_fit": "profiling"
330
+ },
331
+ {
332
+ "repo_name": "hatchet",
333
+ "repo_link": "https://github.com/LLNL/hatchet",
334
+ "category": "performance",
335
+ "github_about_section": "Graph-indexed Pandas DataFrames for analyzing hierarchical performance data",
336
+ "homepage_link": "https://llnl-hatchet.readthedocs.io",
337
+ "github_topic_closest_fit": "profiling"
338
+ },
339
+ {
340
+ "repo_name": "intelliperf",
341
+ "repo_link": "https://github.com/AMDResearch/intelliperf",
342
+ "category": "performance",
343
+ "github_about_section": "Automated bottleneck detection and solution orchestration",
344
+ "homepage_link": "https://arxiv.org/html/2508.20258v1",
345
+ "github_topic_closest_fit": "profiling"
346
+ },
347
+ {
348
+ "repo_name": "omnitrace",
349
+ "repo_link": "https://github.com/ROCm/omnitrace",
350
+ "category": "performance testing",
351
+ "github_about_section": "Omnitrace: Application Profiling, Tracing, and Analysis",
352
+ "homepage_link": "https://rocm.docs.amd.com/projects/omnitrace/en/docs-6.2.4",
353
+ "github_topic_closest_fit": "profiling"
354
+ },
355
+ {
356
+ "repo_name": "jax",
357
+ "repo_link": "https://github.com/jax-ml/jax",
358
+ "category": "scientific computing",
359
+ "github_about_section": "Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more",
360
+ "homepage_link": "https://docs.jax.dev",
361
+ "github_topic_closest_fit": "scientific-computing"
362
+ },
363
+ {
364
+ "repo_name": "numpy",
365
+ "repo_link": "https://github.com/numpy/numpy",
366
+ "category": "scientific computing",
367
+ "github_about_section": "The fundamental package for scientific computing with Python.",
368
+ "homepage_link": "https://numpy.org",
369
+ "github_topic_closest_fit": "scientific-computing"
370
+ },
371
+ {
372
+ "repo_name": "scipy",
373
+ "repo_link": "https://github.com/scipy/scipy",
374
+ "category": "scientific computing",
375
+ "github_about_section": "SciPy library main repository",
376
+ "homepage_link": "https://scipy.org",
377
+ "github_topic_closest_fit": "scientific-computing"
378
+ },
379
+ {
380
+ "repo_name": "elasticsearch",
381
+ "repo_link": "https://github.com/elastic/elasticsearch",
382
+ "category": "search engine",
383
+ "github_about_section": "Free and Open Source, Distributed, RESTful Search Engine",
384
+ "homepage_link": "https://www.elastic.co/products/elasticsearch",
385
+ "github_topic_closest_fit": "search-engine"
386
+ },
387
+ {
388
+ "repo_name": "jupyterlab",
389
+ "repo_link": "https://github.com/jupyterlab/jupyterlab",
390
+ "category": "ui",
391
+ "github_about_section": "JupyterLab computational environment.",
392
+ "homepage_link": "https://jupyterlab.readthedocs.io/",
393
+ "github_topic_closest_fit": "jupyter"
394
+ },
395
+ {
396
+ "repo_name": "milvus",
397
+ "repo_link": "https://github.com/milvus-io/milvus",
398
+ "category": "vector database",
399
+ "github_about_section": "Milvus is a high-performance, cloud-native vector database built for scalable vector ANN search",
400
+ "homepage_link": "https://milvus.io",
401
+ "github_topic_closest_fit": "vector-search"
402
+ },
403
+ {
404
+ "repo_name": "accelerate",
405
+ "repo_link": "https://github.com/huggingface/accelerate",
406
+ "github_about_section": "A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support.",
407
+ "homepage_link": "https://huggingface.co/docs/accelerate"
408
+ },
409
+ {
410
+ "repo_name": "airtable.js",
411
+ "repo_link": "https://github.com/Airtable/airtable.js",
412
+ "github_about_section": "Airtable javascript client"
413
+ },
414
+ {
415
+ "repo_name": "aiter",
416
+ "repo_link": "https://github.com/ROCm/aiter",
417
+ "github_about_section": "AI Tensor Engine for ROCm"
418
+ },
419
+ {
420
+ "repo_name": "ao",
421
+ "repo_link": "https://github.com/pytorch/ao",
422
+ "github_about_section": "PyTorch native quantization and sparsity for training and inference",
423
+ "homepage_link": "https://pytorch.org/ao/stable/index.html",
424
+ "github_topic_closest_fit": "quantization"
425
+ },
426
+ {
427
+ "repo_name": "burn",
428
+ "repo_link": "https://github.com/tracel-ai/burn",
429
+ "github_about_section": "Burn is a next generation tensor library and Deep Learning Framework that doesn't compromise on flexibility, efficiency and portability.",
430
+ "homepage_link": "https://burn.dev",
431
+ "github_topic_closest_fit": "machine-learning"
432
+ },
433
+ {
434
+ "repo_name": "ccache",
435
+ "repo_link": "https://github.com/ccache/ccache",
436
+ "github_about_section": "ccache - a fast compiler cache",
437
+ "homepage_link": "https://ccache.dev"
438
+ },
439
+ {
440
+ "repo_name": "ComfyUI",
441
+ "repo_link": "https://github.com/comfyanonymous/ComfyUI",
442
+ "github_about_section": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.",
443
+ "homepage_link": "https://www.comfy.org/",
444
+ "github_topic_closest_fit": "stable-diffusion"
445
+ },
446
+ {
447
+ "repo_name": "composable_kernel",
448
+ "repo_link": "https://github.com/ROCm/composable_kernel",
449
+ "github_about_section": "Composable Kernel: Performance Portable Programming Model for Machine Learning Tensor Operators",
450
+ "homepage_link": "https://rocm.docs.amd.com/projects/composable_kernel/en/latest/"
451
+ },
452
+ {
453
+ "repo_name": "cudnn-frontend",
454
+ "repo_link": "https://github.com/NVIDIA/cudnn-frontend",
455
+ "github_about_section": "cudnn_frontend provides a c++ wrapper for the cudnn backend API and samples on how to use it"
456
+ },
457
+ {
458
+ "repo_name": "cuJSON",
459
+ "repo_link": "https://github.com/AutomataLab/cuJSON",
460
+ "github_about_section": "cuJSON: A Highly Parallel JSON Parser for GPUs"
461
+ },
462
+ {
463
+ "repo_name": "DeepSpeed",
464
+ "repo_link": "https://github.com/deepspeedai/DeepSpeed",
465
+ "github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
466
+ "homepage_link": "https://www.deepspeed.ai/",
467
+ "github_topic_closest_fit": "gpu"
468
+ },
469
+ {
470
+ "repo_name": "dstack",
471
+ "repo_link": "https://github.com/dstackai/dstack",
472
+ "github_about_section": "dstack is an open-source control plane for running development, training, and inference jobs on GPUs-across hyperscalers, neoclouds, or on-prem.",
473
+ "homepage_link": "https://dstack.ai",
474
+ "github_topic_closest_fit": "orchestration"
475
+ },
476
+ {
477
+ "repo_name": "flashinfer",
478
+ "repo_link": "https://github.com/flashinfer-ai/flashinfer",
479
+ "github_about_section": "FlashInfer: Kernel Library for LLM Serving",
480
+ "homepage_link": "https://flashinfer.ai",
481
+ "github_topic_closest_fit": "attention"
482
+ },
483
+ {
484
+ "repo_name": "FTorch",
485
+ "repo_link": "https://github.com/Cambridge-ICCS/FTorch",
486
+ "github_about_section": "A library for directly calling PyTorch ML models from Fortran.",
487
+ "homepage_link": "https://cambridge-iccs.github.io/FTorch/",
488
+ "github_topic_closest_fit": "machine-learning"
489
+ },
490
+ {
491
+ "repo_name": "GEAK-agent",
492
+ "repo_link": "https://github.com/AMD-AGI/GEAK-agent",
493
+ "github_about_section": "It is an LLM-based AI agent, which can write correct and efficient gpu kernels automatically."
494
+ },
495
+ {
496
+ "repo_name": "hhvm",
497
+ "repo_link": "https://github.com/facebook/hhvm",
498
+ "github_about_section": "A virtual machine for executing programs written in Hack.",
499
+ "homepage_link": "https://hhvm.com",
500
+ "github_topic_closest_fit": "hack"
501
+ },
502
+ {
503
+ "repo_name": "hip",
504
+ "repo_link": "https://github.com/ROCm/hip",
505
+ "github_about_section": "HIP: C++ Heterogeneous-Compute Interface for Portability",
506
+ "homepage_link": "https://rocmdocs.amd.com/projects/HIP/",
507
+ "github_topic_closest_fit": "hip"
508
+ },
509
+ {
510
+ "repo_name": "hipCUB",
511
+ "repo_link": "https://github.com/ROCm/hipCUB",
512
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
513
+ "homepage_link": "https://github.com/ROCm/rocm-libraries"
514
+ },
515
+ {
516
+ "repo_name": "IMO2025",
517
+ "repo_link": "https://github.com/harmonic-ai/IMO2025"
518
+ },
519
+ {
520
+ "repo_name": "kubernetes",
521
+ "repo_link": "https://github.com/kubernetes/kubernetes",
522
+ "github_about_section": "Production-Grade Container Scheduling and Management",
523
+ "homepage_link": "https://kubernetes.io",
524
+ "github_topic_closest_fit": "containers"
525
+ },
526
+ {
527
+ "repo_name": "lapack",
528
+ "repo_link": "https://github.com/Reference-LAPACK/lapack",
529
+ "github_about_section": "LAPACK development repository",
530
+ "github_topic_closest_fit": "linear-algebra"
531
+ },
532
+ {
533
+ "repo_name": "lean4",
534
+ "repo_link": "https://github.com/leanprover/lean4",
535
+ "github_about_section": "Lean 4 programming language and theorem prover",
536
+ "homepage_link": "https://lean-lang.org",
537
+ "github_topic_closest_fit": "lean"
538
+ },
539
+ {
540
+ "repo_name": "letta",
541
+ "repo_link": "https://github.com/letta-ai/letta",
542
+ "github_about_section": "Letta is the platform for building stateful agents: open AI with advanced memory that can learn and self-improve over time.",
543
+ "homepage_link": "https://docs.letta.com/",
544
+ "github_topic_closest_fit": "ai-agents"
545
+ },
546
+ {
547
+ "repo_name": "lightning-thunder",
548
+ "repo_link": "https://github.com/Lightning-AI/lightning-thunder",
549
+ "github_about_section": "PyTorch compiler that accelerates training and inference. Get built-in optimizations for performance, memory, parallelism, and easily write your own."
550
+ },
551
+ {
552
+ "repo_name": "LMCache",
553
+ "repo_link": "https://github.com/LMCache/LMCache",
554
+ "github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
555
+ "homepage_link": "https://lmcache.ai/",
556
+ "github_topic_closest_fit": "inference"
557
+ },
558
+ {
559
+ "repo_name": "mcp-agent",
560
+ "repo_link": "https://github.com/lastmile-ai/mcp-agent",
561
+ "github_about_section": "Build effective agents using Model Context Protocol and simple workflow patterns",
562
+ "github_topic_closest_fit": "ai-agents"
563
+ },
564
+ {
565
+ "repo_name": "Megakernels",
566
+ "repo_link": "https://github.com/HazyResearch/Megakernels",
567
+ "github_about_section": "kernels, of the mega variety"
568
+ },
569
+ {
570
+ "repo_name": "metaflow",
571
+ "repo_link": "https://github.com/Netflix/metaflow",
572
+ "github_about_section": "Build, Manage and Deploy AI/ML Systems",
573
+ "homepage_link": "https://metaflow.org",
574
+ "github_topic_closest_fit": "machine-learning"
575
+ },
576
+ {
577
+ "repo_name": "MIOpen",
578
+ "repo_link": "https://github.com/ROCm/MIOpen",
579
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
580
+ "homepage_link": "https://github.com/ROCm/rocm-libraries"
581
+ },
582
+ {
583
+ "repo_name": "modelcontextprotocol",
584
+ "repo_link": "https://github.com/modelcontextprotocol/modelcontextprotocol",
585
+ "github_about_section": "Specification and documentation for the Model Context Protocol",
586
+ "homepage_link": "https://modelcontextprotocol.io"
587
+ },
588
+ {
589
+ "repo_name": "modular",
590
+ "repo_link": "https://github.com/modular/modular",
591
+ "github_about_section": "The Modular Platform (includes MAX & Mojo)",
592
+ "homepage_link": "https://docs.modular.com/",
593
+ "github_topic_closest_fit": "mojo"
594
+ },
595
+ {
596
+ "repo_name": "monarch",
597
+ "repo_link": "https://github.com/meta-pytorch/monarch",
598
+ "github_about_section": "PyTorch Single Controller",
599
+ "homepage_link": "https://meta-pytorch.org/monarch"
600
+ },
601
+ {
602
+ "repo_name": "Mooncake",
603
+ "repo_link": "https://github.com/kvcache-ai/Mooncake",
604
+ "github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
605
+ "homepage_link": "https://kvcache-ai.github.io/Mooncake/",
606
+ "github_topic_closest_fit": "inference"
607
+ },
608
+ {
609
+ "repo_name": "nccl",
610
+ "repo_link": "https://github.com/NVIDIA/nccl",
611
+ "github_about_section": "Optimized primitives for collective multi-GPU communication",
612
+ "homepage_link": "https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html"
613
+ },
614
+ {
615
+ "repo_name": "neuronx-distributed-inference",
616
+ "repo_link": "https://github.com/aws-neuron/neuronx-distributed-inference"
617
+ },
618
+ {
619
+ "repo_name": "nixl",
620
+ "repo_link": "https://github.com/ai-dynamo/nixl",
621
+ "github_about_section": "NVIDIA Inference Xfer Library (NIXL)"
622
+ },
623
+ {
624
+ "repo_name": "ome",
625
+ "repo_link": "https://github.com/sgl-project/ome",
626
+ "github_about_section": "OME is a Kubernetes operator for enterprise-grade management and serving of Large Language Models (LLMs)",
627
+ "homepage_link": "http://docs.sglang.ai/ome/",
628
+ "github_topic_closest_fit": "k8s"
629
+ },
630
+ {
631
+ "repo_name": "ondemand",
632
+ "repo_link": "https://github.com/OSC/ondemand",
633
+ "github_about_section": "Supercomputing. Seamlessly. Open, Interactive HPC Via the Web",
634
+ "homepage_link": "https://openondemand.org/",
635
+ "github_topic_closest_fit": "hpc"
636
+ },
637
+ {
638
+ "repo_name": "oneDPL",
639
+ "repo_link": "https://github.com/uxlfoundation/oneDPL",
640
+ "github_about_section": "oneAPI DPC++ Library (oneDPL)",
641
+ "homepage_link": "https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/dpc-library.html"
642
+ },
643
+ {
644
+ "repo_name": "openevolve",
645
+ "repo_link": "https://github.com/codelion/openevolve",
646
+ "github_about_section": "Open-source implementation of AlphaEvolve",
647
+ "github_topic_closest_fit": "genetic-algorithm"
648
+ },
649
+ {
650
+ "repo_name": "ort",
651
+ "repo_link": "https://github.com/pytorch/ort",
652
+ "github_about_section": "Accelerate PyTorch models with ONNX Runtime"
653
+ },
654
+ {
655
+ "repo_name": "peft",
656
+ "repo_link": "https://github.com/huggingface/peft",
657
+ "github_about_section": "PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.",
658
+ "homepage_link": "https://huggingface.co/docs/peft",
659
+ "github_topic_closest_fit": "lora"
660
+ },
661
+ {
662
+ "repo_name": "Primus-Turbo",
663
+ "repo_link": "https://github.com/AMD-AGI/Primus-Turbo"
664
+ },
665
+ {
666
+ "repo_name": "pybind11",
667
+ "repo_link": "https://github.com/pybind/pybind11",
668
+ "github_about_section": "Seamless operability between C++11 and Python",
669
+ "homepage_link": "https://pybind11.readthedocs.io/",
670
+ "github_topic_closest_fit": "bindings"
671
+ },
672
+ {
673
+ "repo_name": "RaBitQ",
674
+ "repo_link": "https://github.com/gaoj0017/RaBitQ",
675
+ "github_about_section": "[SIGMOD 2024] RaBitQ: Quantizing High-Dimensional Vectors with a Theoretical Error Bound for Approximate Nearest Neighbor Search",
676
+ "homepage_link": "https://github.com/VectorDB-NTU/RaBitQ-Library",
677
+ "github_topic_closest_fit": "nearest-neighbor-search"
678
+ },
679
+ {
680
+ "repo_name": "rdma-core",
681
+ "repo_link": "https://github.com/linux-rdma/rdma-core",
682
+ "github_about_section": "RDMA core userspace libraries and daemons",
683
+ "github_topic_closest_fit": "linux-kernel"
684
+ },
685
+ {
686
+ "repo_name": "rocFFT",
687
+ "repo_link": "https://github.com/ROCm/rocFFT",
688
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
689
+ "homepage_link": "https://github.com/ROCm/rocm-libraries",
690
+ "github_topic_closest_fit": "hip"
691
+ },
692
+ {
693
+ "repo_name": "ROCm",
694
+ "repo_link": "https://github.com/ROCm/ROCm",
695
+ "github_about_section": "AMD ROCm Software - GitHub Home",
696
+ "homepage_link": "https://rocm.docs.amd.com",
697
+ "github_topic_closest_fit": "documentation"
698
+ },
699
+ {
700
+ "repo_name": "rocm-systems",
701
+ "repo_link": "https://github.com/ROCm/rocm-systems",
702
+ "github_about_section": "super repo for rocm systems projects"
703
+ },
704
+ {
705
+ "repo_name": "rocPRIM",
706
+ "repo_link": "https://github.com/ROCm/rocPRIM",
707
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
708
+ "homepage_link": "https://github.com/ROCm/rocm-libraries",
709
+ "github_topic_closest_fit": "hip"
710
+ },
711
+ {
712
+ "repo_name": "rocRAND",
713
+ "repo_link": "https://github.com/ROCm/rocRAND",
714
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
715
+ "homepage_link": "https://github.com/ROCm/rocm-libraries",
716
+ "github_topic_closest_fit": "hip"
717
+ },
718
+ {
719
+ "repo_name": "rocSOLVER",
720
+ "repo_link": "https://github.com/ROCm/rocSOLVER",
721
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
722
+ "homepage_link": "https://github.com/ROCm/rocm-libraries",
723
+ "github_topic_closest_fit": "rocm"
724
+ },
725
+ {
726
+ "repo_name": "rocSPARSE",
727
+ "repo_link": "https://github.com/ROCm/rocSPARSE",
728
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
729
+ "homepage_link": "https://github.com/ROCm/rocm-libraries"
730
+ },
731
+ {
732
+ "repo_name": "roctracer",
733
+ "repo_link": "https://github.com/ROCm/roctracer",
734
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-systems repo",
735
+ "homepage_link": "https://github.com/ROCm/rocm-systems"
736
+ },
737
+ {
738
+ "repo_name": "Self-Forcing",
739
+ "repo_link": "https://github.com/guandeh17/Self-Forcing",
740
+ "github_about_section": "Official codebase for \"Self Forcing: Bridging Training and Inference in Autoregressive Video Diffusion\" (NeurIPS 2025 Spotlight)"
741
+ },
742
+ {
743
+ "repo_name": "server",
744
+ "repo_link": "https://github.com/triton-inference-server/server",
745
+ "github_about_section": "The Triton Inference Server provides an optimized cloud and edge inferencing solution.",
746
+ "homepage_link": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html",
747
+ "github_topic_closest_fit": "inference"
748
+ },
749
+ {
750
+ "repo_name": "spark",
751
+ "repo_link": "https://github.com/apache/spark",
752
+ "github_about_section": "Apache Spark - A unified analytics engine for large-scale data processing",
753
+ "homepage_link": "https://spark.apache.org/",
754
+ "github_topic_closest_fit": "big-data"
755
+ },
756
+ {
757
+ "repo_name": "StreamDiffusion",
758
+ "repo_link": "https://github.com/cumulo-autumn/StreamDiffusion",
759
+ "github_about_section": "StreamDiffusion: A Pipeline-Level Solution for Real-Time Interactive Generation"
760
+ },
761
+ {
762
+ "repo_name": "streamv2v",
763
+ "repo_link": "https://github.com/Jeff-LiangF/streamv2v",
764
+ "github_about_section": "Official Pytorch implementation of StreamV2V.",
765
+ "homepage_link": "https://jeff-liangf.github.io/projects/streamv2v/"
766
+ },
767
+ {
768
+ "repo_name": "synthetic-data-kit",
769
+ "repo_link": "https://github.com/meta-llama/synthetic-data-kit",
770
+ "github_about_section": "Tool for generating high quality Synthetic datasets",
771
+ "homepage_link": "https://pypi.org/project/synthetic-data-kit/",
772
+ "github_topic_closest_fit": "generation"
773
+ },
774
+ {
775
+ "repo_name": "Tensile",
776
+ "repo_link": "https://github.com/ROCm/Tensile",
777
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
778
+ "homepage_link": "https://github.com/ROCm/rocm-libraries",
779
+ "github_topic_closest_fit": "gpu"
780
+ },
781
+ {
782
+ "repo_name": "tflite-micro",
783
+ "repo_link": "https://github.com/tensorflow/tflite-micro",
784
+ "github_about_section": "Infrastructure to enable deployment of ML models to low-power resource-constrained embedded targets (including microcontrollers and digital signal processors)."
785
+ },
786
+ {
787
+ "repo_name": "torchdynamo",
788
+ "repo_link": "https://github.com/pytorch/torchdynamo",
789
+ "github_about_section": "A Python-level JIT compiler designed to make unmodified PyTorch programs faster."
790
+ },
791
+ {
792
+ "repo_name": "torchtitan",
793
+ "repo_link": "https://github.com/pytorch/torchtitan",
794
+ "github_about_section": "A PyTorch native platform for training generative AI models"
795
+ },
796
+ {
797
+ "repo_name": "torchtitan",
798
+ "repo_link": "https://github.com/AMD-AGI/torchtitan",
799
+ "github_about_section": "A PyTorch native platform for training generative AI models"
800
+ },
801
+ {
802
+ "repo_name": "transformers",
803
+ "repo_link": "https://github.com/huggingface/transformers",
804
+ "github_about_section": "Transformers: the model-definition framework for state-of-the-art machine learning models in text, vision, audio, and multimodal models, for both inference and training.",
805
+ "homepage_link": "https://huggingface.co/transformers"
806
+ },
807
+ {
808
+ "repo_name": "Triton-distributed",
809
+ "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
810
+ "github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
811
+ "homepage_link": "https://triton-distributed.readthedocs.io/en/latest"
812
+ },
813
+ {
814
+ "repo_name": "triton-runner",
815
+ "repo_link": "https://github.com/toyaix/triton-runner",
816
+ "github_about_section": "Multi-Level Triton Runner supporting Python, IR, PTX, and cubin.",
817
+ "homepage_link": "https://triton-runner.org",
818
+ "github_topic_closest_fit": "triton"
819
+ },
820
+ {
821
+ "repo_name": "tritonparse",
822
+ "repo_link": "https://github.com/meta-pytorch/tritonparse",
823
+ "github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
824
+ "homepage_link": "https://meta-pytorch.org/tritonparse/",
825
+ "github_topic_closest_fit": "triton"
826
+ },
827
+ {
828
+ "repo_name": "trl",
829
+ "repo_link": "https://github.com/huggingface/trl",
830
+ "github_about_section": "Train transformer language models with reinforcement learning.",
831
+ "homepage_link": "http://hf.co/docs/trl"
832
+ },
833
+ {
834
+ "repo_name": "truss",
835
+ "repo_link": "https://github.com/basetenlabs/truss",
836
+ "github_about_section": "The simplest way to serve AI/ML models in production",
837
+ "homepage_link": "https://truss.baseten.co",
838
+ "github_topic_closest_fit": "machine-learning"
839
+ },
840
+ {
841
+ "repo_name": "unsloth",
842
+ "repo_link": "https://github.com/unslothai/unsloth",
843
+ "github_about_section": "Fine-tuning & Reinforcement Learning for LLMs. Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.",
844
+ "homepage_link": "https://docs.unsloth.ai"
845
+ },
846
+ {
847
+ "repo_name": "verl",
848
+ "repo_link": "https://github.com/volcengine/verl",
849
+ "github_about_section": "verl: Volcano Engine Reinforcement Learning for LLMs",
850
+ "homepage_link": "https://verl.readthedocs.io/en/latest/index.html"
851
+ },
852
+ {
853
+ "repo_name": "Vulkan-Docs",
854
+ "repo_link": "https://github.com/KhronosGroup/Vulkan-Docs",
855
+ "github_about_section": "The Vulkan API Specification and related tools"
856
+ },
857
+ {
858
+ "repo_name": "Wan2.2",
859
+ "repo_link": "https://github.com/Wan-Video/Wan2.2",
860
+ "github_about_section": "Wan: Open and Advanced Large-Scale Video Generative Models",
861
+ "homepage_link": "https://wan.video",
862
+ "github_topic_closest_fit": "video-generation"
863
+ },
864
+ {
865
+ "repo_name": "warp",
866
+ "repo_link": "https://github.com/NVIDIA/warp",
867
+ "github_about_section": "A Python framework for accelerated simulation, data generation and spatial computing.",
868
+ "homepage_link": "https://nvidia.github.io/warp/",
869
+ "github_topic_closest_fit": "gpu"
870
+ }
 
 
 
 
 
 
 
 
 
 
 
 
871
  ]