TylerHilbert commited on
Commit
a2b2115
·
1 Parent(s): 67d7823

Removed projects that may not have been a focus.

Browse files
PyTorchConference2025_GithubRepos.json CHANGED
@@ -321,17 +321,6 @@
321
  "contributors_2024": 18,
322
  "contributors_2023": 0
323
  },
324
- {
325
- "repo_name": "composable_kernel",
326
- "repo_link": "https://github.com/ROCm/composable_kernel",
327
- "category": "gpu kernels",
328
- "github_about_section": "Composable Kernel: Performance Portable Programming Model for Machine Learning Tensor Operators",
329
- "homepage_link": "https://rocm.docs.amd.com/projects/composable_kernel",
330
- "contributors_all": 190,
331
- "contributors_2025": 140,
332
- "contributors_2024": 58,
333
- "contributors_2023": 33
334
- },
335
  {
336
  "repo_name": "Mooncake",
337
  "repo_link": "https://github.com/kvcache-ai/Mooncake",
@@ -367,18 +356,6 @@
367
  "contributors_2024": 100,
368
  "contributors_2023": 5
369
  },
370
- {
371
- "repo_name": "lean4",
372
- "repo_link": "https://github.com/leanprover/lean4",
373
- "category": "theorem prover",
374
- "github_about_section": "Lean 4 programming language and theorem prover",
375
- "homepage_link": "https://lean-lang.org",
376
- "github_topic_closest_fit": "lean",
377
- "contributors_all": 278,
378
- "contributors_2025": 110,
379
- "contributors_2024": 85,
380
- "contributors_2023": 64
381
- },
382
  {
383
  "repo_name": "ComfyUI",
384
  "repo_link": "https://github.com/comfyanonymous/ComfyUI",
@@ -403,17 +380,6 @@
403
  "contributors_2024": 27,
404
  "contributors_2023": 3
405
  },
406
- {
407
- "repo_name": "burn",
408
- "repo_link": "https://github.com/tracel-ai/burn",
409
- "category": "multi-purpose library",
410
- "github_about_section": "Burn is a next generation tensor library and Deep Learning Framework that doesn't compromise on flexibility, efficiency and portability.",
411
- "homepage_link": "https://burn.dev",
412
- "contributors_all": 237,
413
- "contributors_2025": 99,
414
- "contributors_2024": 104,
415
- "contributors_2023": 62
416
- },
417
  {
418
  "repo_name": "accelerate",
419
  "repo_link": "https://github.com/huggingface/accelerate",
@@ -507,28 +473,6 @@
507
  "contributors_2024": 61,
508
  "contributors_2023": 0
509
  },
510
- {
511
- "repo_name": "nixl",
512
- "repo_link": "https://github.com/ai-dynamo/nixl",
513
- "category": "distributed computing",
514
- "github_about_section": "NVIDIA Inference Xfer Library (NIXL)",
515
- "contributors_all": 78,
516
- "contributors_2025": 78,
517
- "contributors_2024": 0,
518
- "contributors_2023": 0
519
- },
520
- {
521
- "repo_name": "jupyterlab",
522
- "repo_link": "https://github.com/jupyterlab/jupyterlab",
523
- "category": "user interface",
524
- "github_about_section": "JupyterLab computational environment.",
525
- "homepage_link": "https://jupyterlab.readthedocs.io",
526
- "github_topic_closest_fit": "jupyter",
527
- "contributors_all": 698,
528
- "contributors_2025": 77,
529
- "contributors_2024": 85,
530
- "contributors_2023": 100
531
- },
532
  {
533
  "repo_name": "hipBLASLt",
534
  "repo_link": "https://github.com/AMD-AGI/hipBLASLt",
@@ -653,30 +597,6 @@
653
  "contributors_2024": 30,
654
  "contributors_2023": 21
655
  },
656
- {
657
- "repo_name": "ondemand",
658
- "repo_link": "https://github.com/OSC/ondemand",
659
- "category": "hpc portal",
660
- "github_about_section": "Supercomputing. Seamlessly. Open, Interactive HPC Via the Web",
661
- "homepage_link": "https://openondemand.org",
662
- "github_topic_closest_fit": "hpc",
663
- "contributors_all": 117,
664
- "contributors_2025": 43,
665
- "contributors_2024": 23,
666
- "contributors_2023": 21
667
- },
668
- {
669
- "repo_name": "pybind11",
670
- "repo_link": "https://github.com/pybind/pybind11",
671
- "category": "middleware",
672
- "github_about_section": "Seamless operability between C++11 and Python",
673
- "homepage_link": "https://pybind11.readthedocs.io",
674
- "github_topic_closest_fit": "bindings",
675
- "contributors_all": 404,
676
- "contributors_2025": 43,
677
- "contributors_2024": 45,
678
- "contributors_2023": 42
679
- },
680
  {
681
  "repo_name": "cuda-python",
682
  "repo_link": "https://github.com/NVIDIA/cuda-python",
@@ -735,17 +655,6 @@
735
  "contributors_2024": 37,
736
  "contributors_2023": 9
737
  },
738
- {
739
- "repo_name": "AdaptiveCpp",
740
- "repo_link": "https://github.com/AdaptiveCpp/AdaptiveCpp",
741
- "category": "compiler",
742
- "github_about_section": "Compiler for multiple programming models (SYCL, C++ standard parallelism, HIP/CUDA) for CPUs and GPUs from all vendors: The independent, community-driven compiler for C++-based heterogeneous programming models. Lets applications adapt themselves to all the hardware in the system - even at runtime!",
743
- "homepage_link": "https://adaptivecpp.github.io",
744
- "contributors_all": 93,
745
- "contributors_2025": 32,
746
- "contributors_2024": 32,
747
- "contributors_2023": 24
748
- },
749
  {
750
  "repo_name": "Triton-distributed",
751
  "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
@@ -793,18 +702,6 @@
793
  "contributors_2024": 0,
794
  "contributors_2023": 0
795
  },
796
- {
797
- "repo_name": "pocl",
798
- "repo_link": "https://github.com/pocl/pocl",
799
- "category": "parallel computing",
800
- "github_about_section": "pocl - Portable Computing Language",
801
- "homepage_link": "https://portablecl.org",
802
- "github_topic_closest_fit": "parallel-programming",
803
- "contributors_all": 166,
804
- "contributors_2025": 26,
805
- "contributors_2024": 27,
806
- "contributors_2023": 21
807
- },
808
  {
809
  "repo_name": "server",
810
  "repo_link": "https://github.com/triton-inference-server/server",
@@ -817,18 +714,6 @@
817
  "contributors_2024": 36,
818
  "contributors_2023": 34
819
  },
820
- {
821
- "repo_name": "Vulkan-Hpp",
822
- "repo_link": "https://github.com/KhronosGroup/Vulkan-Hpp",
823
- "category": "graphics api",
824
- "github_about_section": "Open-Source Vulkan C++ API",
825
- "homepage_link": "https://vulkan.org",
826
- "github_topic_closest_fit": "vulkan",
827
- "contributors_all": 102,
828
- "contributors_2025": 21,
829
- "contributors_2024": 15,
830
- "contributors_2023": 15
831
- },
832
  {
833
  "repo_name": "ccache",
834
  "repo_link": "https://github.com/ccache/ccache",
@@ -852,16 +737,6 @@
852
  "contributors_2024": 24,
853
  "contributors_2023": 42
854
  },
855
- {
856
- "repo_name": "tflite-micro",
857
- "repo_link": "https://github.com/tensorflow/tflite-micro",
858
- "category": "ML for Microcontrollers",
859
- "github_about_section": "Infrastructure to enable deployment of ML models to low-power resource-constrained embedded targets (including microcontrollers and digital signal processors).",
860
- "contributors_all": 111,
861
- "contributors_2025": 19,
862
- "contributors_2024": 25,
863
- "contributors_2023": 31
864
- },
865
  {
866
  "repo_name": "quack",
867
  "repo_link": "https://github.com/Dao-AILab/quack",
@@ -872,17 +747,6 @@
872
  "contributors_2024": 0,
873
  "contributors_2023": 0
874
  },
875
- {
876
- "repo_name": "oneDPL",
877
- "repo_link": "https://github.com/uxlfoundation/oneDPL",
878
- "category": "parallel computing",
879
- "github_about_section": "oneAPI DPC++ Library (oneDPL)",
880
- "homepage_link": "https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/dpc-library.html",
881
- "contributors_all": 67,
882
- "contributors_2025": 17,
883
- "contributors_2024": 29,
884
- "contributors_2023": 28
885
- },
886
  {
887
  "repo_name": "KernelBench",
888
  "repo_link": "https://github.com/ScalingIntelligence/KernelBench",
@@ -951,18 +815,6 @@
951
  "contributors_2024": 0,
952
  "contributors_2023": 0
953
  },
954
- {
955
- "repo_name": "SYCL-Docs",
956
- "repo_link": "https://github.com/KhronosGroup/SYCL-Docs",
957
- "category": "parallel computing",
958
- "github_about_section": "SYCL Open Source Specification",
959
- "homepage_link": "https://khronos.org/sycl",
960
- "github_topic_closest_fit": "parallel-programming",
961
- "contributors_all": 67,
962
- "contributors_2025": 13,
963
- "contributors_2024": 20,
964
- "contributors_2023": 27
965
- },
966
  {
967
  "repo_name": "Primus-Turbo",
968
  "repo_link": "https://github.com/AMD-AGI/Primus-Turbo",
@@ -1041,30 +893,6 @@
1041
  "contributors_2024": 0,
1042
  "contributors_2023": 0
1043
  },
1044
- {
1045
- "repo_name": "OpenCL-SDK",
1046
- "repo_link": "https://github.com/KhronosGroup/OpenCL-SDK",
1047
- "category": "parallel computing",
1048
- "github_about_section": "OpenCL SDK",
1049
- "homepage_link": "https://khronos.org/opencl",
1050
- "github_topic_closest_fit": "parallel-programming",
1051
- "contributors_all": 25,
1052
- "contributors_2025": 8,
1053
- "contributors_2024": 6,
1054
- "contributors_2023": 9
1055
- },
1056
- {
1057
- "repo_name": "ZLUDA",
1058
- "repo_link": "https://github.com/vosen/ZLUDA",
1059
- "category": "middleware",
1060
- "github_about_section": "CUDA on non-NVIDIA GPUs",
1061
- "homepage_link": "https://vosen.github.io/ZLUDA",
1062
- "github_topic_closest_fit": "parallel-programming",
1063
- "contributors_all": 15,
1064
- "contributors_2025": 8,
1065
- "contributors_2024": 4,
1066
- "contributors_2023": 0
1067
- },
1068
  {
1069
  "repo_name": "intelliperf",
1070
  "repo_link": "https://github.com/AMDResearch/intelliperf",
@@ -1077,17 +905,6 @@
1077
  "contributors_2024": 0,
1078
  "contributors_2023": 0
1079
  },
1080
- {
1081
- "repo_name": "nccl",
1082
- "repo_link": "https://github.com/NVIDIA/nccl",
1083
- "category": "distributed computing",
1084
- "github_about_section": "Optimized primitives for collective multi-GPU communication",
1085
- "homepage_link": "https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html",
1086
- "contributors_all": 51,
1087
- "contributors_2025": 7,
1088
- "contributors_2024": 5,
1089
- "contributors_2023": 6
1090
- },
1091
  {
1092
  "repo_name": "cudnn-frontend",
1093
  "repo_link": "https://github.com/NVIDIA/cudnn-frontend",
@@ -1183,18 +1000,6 @@
1183
  "contributors_2024": 12,
1184
  "contributors_2023": 2
1185
  },
1186
- {
1187
- "repo_name": "cuJSON",
1188
- "repo_link": "https://github.com/AutomataLab/cuJSON",
1189
- "category": "library leveraging parallel compute",
1190
- "github_about_section": "cuJSON: A Highly Parallel JSON Parser for GPUs",
1191
- "homepage_link": "https://dl.acm.org/doi/10.1145/3760250.3762222",
1192
- "github_topic_closest_fit": "json-parser",
1193
- "contributors_all": 2,
1194
- "contributors_2025": 2,
1195
- "contributors_2024": 2,
1196
- "contributors_2023": 0
1197
- },
1198
  {
1199
  "repo_name": "IMO2025",
1200
  "repo_link": "https://github.com/harmonic-ai/IMO2025",
@@ -1240,29 +1045,6 @@
1240
  "contributors_2024": 0,
1241
  "contributors_2023": 0
1242
  },
1243
- {
1244
- "repo_name": "nvcc4jupyter",
1245
- "category": "middleware",
1246
- "repo_link": "https://github.com/andreinechaev/nvcc4jupyter",
1247
- "github_about_section": "A plugin for Jupyter Notebook to run CUDA C/C++ code",
1248
- "homepage_link": "https://nvcc4jupyter.readthedocs.io",
1249
- "contributors_all": 9,
1250
- "contributors_2025": 0,
1251
- "contributors_2024": 3,
1252
- "contributors_2023": 3
1253
- },
1254
- {
1255
- "repo_name": "CU2CL",
1256
- "repo_link": "https://github.com/vtsynergy/CU2CL",
1257
- "category": "source-to-source translator",
1258
- "github_about_section": "A prototype CUDA-to-OpenCL source-to-source translator, built on the Clang compiler framework",
1259
- "homepage_link": "http://chrec.cs.vt.edu/cu2cl",
1260
- "github_topic_closest_fit": "parallel-programming",
1261
- "contributors_all": 3,
1262
- "contributors_2025": 0,
1263
- "contributors_2024": 0,
1264
- "contributors_2023": 0
1265
- },
1266
  {
1267
  "repo_name": "triSYCL",
1268
  "repo_link": "https://github.com/triSYCL/triSYCL",
 
321
  "contributors_2024": 18,
322
  "contributors_2023": 0
323
  },
 
 
 
 
 
 
 
 
 
 
 
324
  {
325
  "repo_name": "Mooncake",
326
  "repo_link": "https://github.com/kvcache-ai/Mooncake",
 
356
  "contributors_2024": 100,
357
  "contributors_2023": 5
358
  },
 
 
 
 
 
 
 
 
 
 
 
 
359
  {
360
  "repo_name": "ComfyUI",
361
  "repo_link": "https://github.com/comfyanonymous/ComfyUI",
 
380
  "contributors_2024": 27,
381
  "contributors_2023": 3
382
  },
 
 
 
 
 
 
 
 
 
 
 
383
  {
384
  "repo_name": "accelerate",
385
  "repo_link": "https://github.com/huggingface/accelerate",
 
473
  "contributors_2024": 61,
474
  "contributors_2023": 0
475
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
476
  {
477
  "repo_name": "hipBLASLt",
478
  "repo_link": "https://github.com/AMD-AGI/hipBLASLt",
 
597
  "contributors_2024": 30,
598
  "contributors_2023": 21
599
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
600
  {
601
  "repo_name": "cuda-python",
602
  "repo_link": "https://github.com/NVIDIA/cuda-python",
 
655
  "contributors_2024": 37,
656
  "contributors_2023": 9
657
  },
 
 
 
 
 
 
 
 
 
 
 
658
  {
659
  "repo_name": "Triton-distributed",
660
  "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
 
702
  "contributors_2024": 0,
703
  "contributors_2023": 0
704
  },
 
 
 
 
 
 
 
 
 
 
 
 
705
  {
706
  "repo_name": "server",
707
  "repo_link": "https://github.com/triton-inference-server/server",
 
714
  "contributors_2024": 36,
715
  "contributors_2023": 34
716
  },
 
 
 
 
 
 
 
 
 
 
 
 
717
  {
718
  "repo_name": "ccache",
719
  "repo_link": "https://github.com/ccache/ccache",
 
737
  "contributors_2024": 24,
738
  "contributors_2023": 42
739
  },
 
 
 
 
 
 
 
 
 
 
740
  {
741
  "repo_name": "quack",
742
  "repo_link": "https://github.com/Dao-AILab/quack",
 
747
  "contributors_2024": 0,
748
  "contributors_2023": 0
749
  },
 
 
 
 
 
 
 
 
 
 
 
750
  {
751
  "repo_name": "KernelBench",
752
  "repo_link": "https://github.com/ScalingIntelligence/KernelBench",
 
815
  "contributors_2024": 0,
816
  "contributors_2023": 0
817
  },
 
 
 
 
 
 
 
 
 
 
 
 
818
  {
819
  "repo_name": "Primus-Turbo",
820
  "repo_link": "https://github.com/AMD-AGI/Primus-Turbo",
 
893
  "contributors_2024": 0,
894
  "contributors_2023": 0
895
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
896
  {
897
  "repo_name": "intelliperf",
898
  "repo_link": "https://github.com/AMDResearch/intelliperf",
 
905
  "contributors_2024": 0,
906
  "contributors_2023": 0
907
  },
 
 
 
 
 
 
 
 
 
 
 
908
  {
909
  "repo_name": "cudnn-frontend",
910
  "repo_link": "https://github.com/NVIDIA/cudnn-frontend",
 
1000
  "contributors_2024": 12,
1001
  "contributors_2023": 2
1002
  },
 
 
 
 
 
 
 
 
 
 
 
 
1003
  {
1004
  "repo_name": "IMO2025",
1005
  "repo_link": "https://github.com/harmonic-ai/IMO2025",
 
1045
  "contributors_2024": 0,
1046
  "contributors_2023": 0
1047
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1048
  {
1049
  "repo_name": "triSYCL",
1050
  "repo_link": "https://github.com/triSYCL/triSYCL",
PyTorchConference2025_GithubRepos_Full.json ADDED
@@ -0,0 +1,1353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "repo_name": "llvm-project",
4
+ "repo_link": "https://github.com/llvm/llvm-project",
5
+ "category": "compiler",
6
+ "github_about_section": "The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.",
7
+ "homepage_link": "http://llvm.org",
8
+ "github_topic_closest_fit": "compiler",
9
+ "contributors_all": 6680,
10
+ "contributors_2025": 2378,
11
+ "contributors_2024": 2130,
12
+ "contributors_2023": 1920
13
+ },
14
+ {
15
+ "repo_name": "vllm",
16
+ "repo_link": "https://github.com/vllm-project/vllm",
17
+ "category": "inference engine",
18
+ "github_about_section": "A high-throughput and memory-efficient inference and serving engine for LLMs",
19
+ "homepage_link": "https://docs.vllm.ai",
20
+ "github_topic_closest_fit": "inference",
21
+ "contributors_all": 1885,
22
+ "contributors_2025": 1369,
23
+ "contributors_2024": 579,
24
+ "contributors_2023": 145
25
+ },
26
+ {
27
+ "repo_name": "pytorch",
28
+ "repo_link": "https://github.com/pytorch/pytorch",
29
+ "category": "machine learning framework",
30
+ "github_about_section": "Tensors and Dynamic neural networks in Python with strong GPU acceleration",
31
+ "homepage_link": "https://pytorch.org",
32
+ "github_topic_closest_fit": "machine-learning",
33
+ "contributors_all": 5434,
34
+ "contributors_2025": 1187,
35
+ "contributors_2024": 1090,
36
+ "contributors_2023": 1024
37
+ },
38
+ {
39
+ "repo_name": "transformers",
40
+ "repo_link": "https://github.com/huggingface/transformers",
41
+ "category": "multi-purpose library",
42
+ "github_about_section": "Transformers: the model-definition framework for state-of-the-art machine learning models in text, vision, audio, and multimodal models, for both inference and training.",
43
+ "homepage_link": "https://huggingface.co/transformers",
44
+ "github_topic_closest_fit": "machine-learning",
45
+ "contributors_all": 3582,
46
+ "contributors_2025": 860,
47
+ "contributors_2024": 769,
48
+ "contributors_2023": 758
49
+ },
50
+ {
51
+ "repo_name": "sglang",
52
+ "repo_link": "https://github.com/sgl-project/sglang",
53
+ "category": "inference engine",
54
+ "github_about_section": "SGLang is a fast serving framework for large language models and vision language models.",
55
+ "homepage_link": "https://docs.sglang.ai",
56
+ "github_topic_closest_fit": "inference",
57
+ "contributors_all": 937,
58
+ "contributors_2025": 796,
59
+ "contributors_2024": 189,
60
+ "contributors_2023": 1
61
+ },
62
+ {
63
+ "repo_name": "hhvm",
64
+ "repo_link": "https://github.com/facebook/hhvm",
65
+ "category": "virtual machine",
66
+ "github_about_section": "A virtual machine for executing programs written in Hack.",
67
+ "homepage_link": "https://hhvm.com",
68
+ "github_topic_closest_fit": "virtual-machine",
69
+ "contributors_all": 2624,
70
+ "contributors_2025": 692,
71
+ "contributors_2024": 648,
72
+ "contributors_2023": 604
73
+ },
74
+ {
75
+ "repo_name": "llama.cpp",
76
+ "repo_link": "https://github.com/ggml-org/llama.cpp",
77
+ "category": "inference engine",
78
+ "github_about_section": "LLM inference in C/C++",
79
+ "homepage_link": "https://ggml.ai",
80
+ "github_topic_closest_fit": "inference",
81
+ "contributors_all": 1374,
82
+ "contributors_2025": 535,
83
+ "contributors_2024": 575,
84
+ "contributors_2023": 461
85
+ },
86
+ {
87
+ "repo_name": "kubernetes",
88
+ "repo_link": "https://github.com/kubernetes/kubernetes",
89
+ "category": "container orchestration",
90
+ "github_about_section": "Production-Grade Container Scheduling and Management",
91
+ "homepage_link": "https://kubernetes.io",
92
+ "github_topic_closest_fit": "kubernetes",
93
+ "contributors_all": 5041,
94
+ "contributors_2025": 509,
95
+ "contributors_2024": 498,
96
+ "contributors_2023": 565
97
+ },
98
+ {
99
+ "repo_name": "tensorflow",
100
+ "repo_link": "https://github.com/tensorflow/tensorflow",
101
+ "category": "machine learning framework",
102
+ "github_about_section": "An Open Source Machine Learning Framework for Everyone",
103
+ "homepage_link": "https://tensorflow.org",
104
+ "github_topic_closest_fit": "machine-learning",
105
+ "contributors_all": 4618,
106
+ "contributors_2025": 500,
107
+ "contributors_2024": 523,
108
+ "contributors_2023": 630
109
+ },
110
+ {
111
+ "repo_name": "verl",
112
+ "repo_link": "https://github.com/volcengine/verl",
113
+ "category": "reinforcement learning",
114
+ "github_about_section": "verl: Volcano Engine Reinforcement Learning for LLMs",
115
+ "homepage_link": "https://verl.readthedocs.io",
116
+ "github_topic_closest_fit": "deep-reinforcement-learning",
117
+ "contributors_all": 462,
118
+ "contributors_2025": 454,
119
+ "contributors_2024": 10,
120
+ "contributors_2023": 0
121
+ },
122
+ {
123
+ "repo_name": "rocm-systems",
124
+ "repo_link": "https://github.com/ROCm/rocm-systems",
125
+ "category": "multi-purpose library",
126
+ "github_about_section": "super repo for rocm systems projects",
127
+ "homepage_link": "https://amd.com/en/products/software/rocm.html",
128
+ "github_topic_closest_fit": "amd",
129
+ "contributors_all": 1032,
130
+ "contributors_2025": 440,
131
+ "contributors_2024": 323,
132
+ "contributors_2023": 204
133
+ },
134
+ {
135
+ "repo_name": "ray",
136
+ "repo_link": "https://github.com/ray-project/ray",
137
+ "category": "multi-purpose library",
138
+ "github_about_section": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.",
139
+ "homepage_link": "https://ray.io",
140
+ "github_topic_closest_fit": "machine-learning",
141
+ "contributors_all": 1381,
142
+ "contributors_2025": 397,
143
+ "contributors_2024": 223,
144
+ "contributors_2023": 230
145
+ },
146
+ {
147
+ "repo_name": "spark",
148
+ "repo_link": "https://github.com/apache/spark",
149
+ "category": "data processing",
150
+ "github_about_section": "Apache Spark - A unified analytics engine for large-scale data processing",
151
+ "homepage_link": "https://spark.apache.org",
152
+ "github_topic_closest_fit": "data-processing",
153
+ "contributors_all": 3083,
154
+ "contributors_2025": 322,
155
+ "contributors_2024": 300,
156
+ "contributors_2023": 336
157
+ },
158
+ {
159
+ "repo_name": "goose",
160
+ "repo_link": "https://github.com/block/goose",
161
+ "category": "agent",
162
+ "github_about_section": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM",
163
+ "homepage_link": "https://block.github.io/goose",
164
+ "github_topic_closest_fit": "ai-agents",
165
+ "contributors_all": 332,
166
+ "contributors_2025": 319,
167
+ "contributors_2024": 32,
168
+ "contributors_2023": 0
169
+ },
170
+ {
171
+ "repo_name": "elasticsearch",
172
+ "repo_link": "https://github.com/elastic/elasticsearch",
173
+ "category": "search engine",
174
+ "github_about_section": "Free and Open Source, Distributed, RESTful Search Engine",
175
+ "homepage_link": "https://elastic.co/products/elasticsearch",
176
+ "github_topic_closest_fit": "search-engine",
177
+ "contributors_all": 2297,
178
+ "contributors_2025": 316,
179
+ "contributors_2024": 284,
180
+ "contributors_2023": 270
181
+ },
182
+ {
183
+ "repo_name": "jax",
184
+ "repo_link": "https://github.com/jax-ml/jax",
185
+ "category": "scientific computing",
186
+ "github_about_section": "Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more",
187
+ "homepage_link": "https://docs.jax.dev",
188
+ "github_topic_closest_fit": "scientific-computing",
189
+ "contributors_all": 997,
190
+ "contributors_2025": 312,
191
+ "contributors_2024": 280,
192
+ "contributors_2023": 202
193
+ },
194
+ {
195
+ "repo_name": "modelcontextprotocol",
196
+ "repo_link": "https://github.com/modelcontextprotocol/modelcontextprotocol",
197
+ "category": "mcp",
198
+ "github_about_section": "Specification and documentation for the Model Context Protocol",
199
+ "homepage_link": "https://modelcontextprotocol.io",
200
+ "github_topic_closest_fit": "mcp",
201
+ "contributors_all": 327,
202
+ "contributors_2025": 298,
203
+ "contributors_2024": 42,
204
+ "contributors_2023": 0
205
+ },
206
+ {
207
+ "repo_name": "executorch",
208
+ "repo_link": "https://github.com/pytorch/executorch",
209
+ "category": "model compiler",
210
+ "github_about_section": "On-device AI across mobile, embedded and edge for PyTorch",
211
+ "homepage_link": "https://executorch.ai",
212
+ "github_topic_closest_fit": "inference",
213
+ "contributors_all": 437,
214
+ "contributors_2025": 267,
215
+ "contributors_2024": 243,
216
+ "contributors_2023": 77
217
+ },
218
+ {
219
+ "repo_name": "numpy",
220
+ "repo_link": "https://github.com/numpy/numpy",
221
+ "category": "scientific computing",
222
+ "github_about_section": "The fundamental package for scientific computing with Python.",
223
+ "homepage_link": "https://numpy.org",
224
+ "github_topic_closest_fit": "scientific-computing",
225
+ "contributors_all": 2172,
226
+ "contributors_2025": 235,
227
+ "contributors_2024": 233,
228
+ "contributors_2023": 252
229
+ },
230
+ {
231
+ "repo_name": "triton",
232
+ "repo_link": "https://github.com/triton-lang/triton",
233
+ "category": "parallel computing dsl",
234
+ "github_about_section": "Development repository for the Triton language and compiler",
235
+ "homepage_link": "https://triton-lang.org",
236
+ "github_topic_closest_fit": "parallel-programming",
237
+ "contributors_all": 522,
238
+ "contributors_2025": 233,
239
+ "contributors_2024": 206,
240
+ "contributors_2023": 159
241
+ },
242
+ {
243
+ "repo_name": "modular",
244
+ "repo_link": "https://github.com/modular/modular",
245
+ "category": "parallel computing",
246
+ "github_about_section": "The Modular Platform (includes MAX & Mojo)",
247
+ "homepage_link": "https://docs.modular.com",
248
+ "github_topic_closest_fit": "parallel-programming",
249
+ "contributors_all": 366,
250
+ "contributors_2025": 222,
251
+ "contributors_2024": 205,
252
+ "contributors_2023": 99
253
+ },
254
+ {
255
+ "repo_name": "scipy",
256
+ "repo_link": "https://github.com/scipy/scipy",
257
+ "category": "scientific computing",
258
+ "github_about_section": "SciPy library main repository",
259
+ "homepage_link": "https://scipy.org",
260
+ "github_topic_closest_fit": "scientific-computing",
261
+ "contributors_all": 1973,
262
+ "contributors_2025": 210,
263
+ "contributors_2024": 251,
264
+ "contributors_2023": 245
265
+ },
266
+ {
267
+ "repo_name": "ollama",
268
+ "repo_link": "https://github.com/ollama/ollama",
269
+ "category": "inference engine",
270
+ "github_about_section": "Get up and running with OpenAI gpt-oss, DeepSeek-R1, Gemma 3 and other models.",
271
+ "homepage_link": "https://ollama.com",
272
+ "github_topic_closest_fit": "inference",
273
+ "contributors_all": 574,
274
+ "contributors_2025": 202,
275
+ "contributors_2024": 314,
276
+ "contributors_2023": 97
277
+ },
278
+ {
279
+ "repo_name": "trl",
280
+ "repo_link": "https://github.com/huggingface/trl",
281
+ "category": "reinforcement learning",
282
+ "github_about_section": "Train transformer language models with reinforcement learning.",
283
+ "homepage_link": "http://hf.co/docs/trl",
284
+ "github_topic_closest_fit": "reinforcement-learning",
285
+ "contributors_all": 433,
286
+ "contributors_2025": 189,
287
+ "contributors_2024": 154,
288
+ "contributors_2023": 122
289
+ },
290
+ {
291
+ "repo_name": "flashinfer",
292
+ "repo_link": "https://github.com/flashinfer-ai/flashinfer",
293
+ "category": "gpu kernels",
294
+ "github_about_section": "FlashInfer: Kernel Library for LLM Serving",
295
+ "homepage_link": "https://flashinfer.ai",
296
+ "github_topic_closest_fit": "attention",
297
+ "contributors_all": 205,
298
+ "contributors_2025": 158,
299
+ "contributors_2024": 50,
300
+ "contributors_2023": 11
301
+ },
302
+ {
303
+ "repo_name": "aiter",
304
+ "repo_link": "https://github.com/ROCm/aiter",
305
+ "category": "gpu kernels",
306
+ "github_about_section": "AI Tensor Engine for ROCm",
307
+ "homepage_link": "https://rocm.blogs.amd.com/software-tools-optimization/aiter-ai-tensor-engine/README.html",
308
+ "contributors_all": 151,
309
+ "contributors_2025": 145,
310
+ "contributors_2024": 10,
311
+ "contributors_2023": 0
312
+ },
313
+ {
314
+ "repo_name": "LMCache",
315
+ "repo_link": "https://github.com/LMCache/LMCache",
316
+ "category": "inference",
317
+ "github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
318
+ "homepage_link": "https://lmcache.ai",
319
+ "contributors_all": 152,
320
+ "contributors_2025": 144,
321
+ "contributors_2024": 18,
322
+ "contributors_2023": 0
323
+ },
324
+ {
325
+ "repo_name": "composable_kernel",
326
+ "repo_link": "https://github.com/ROCm/composable_kernel",
327
+ "category": "gpu kernels",
328
+ "github_about_section": "Composable Kernel: Performance Portable Programming Model for Machine Learning Tensor Operators",
329
+ "homepage_link": "https://rocm.docs.amd.com/projects/composable_kernel",
330
+ "contributors_all": 190,
331
+ "contributors_2025": 140,
332
+ "contributors_2024": 58,
333
+ "contributors_2023": 33
334
+ },
335
+ {
336
+ "repo_name": "Mooncake",
337
+ "repo_link": "https://github.com/kvcache-ai/Mooncake",
338
+ "category": "inference",
339
+ "github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
340
+ "homepage_link": "https://kvcache-ai.github.io/Mooncake",
341
+ "github_topic_closest_fit": "inference",
342
+ "contributors_all": 138,
343
+ "contributors_2025": 133,
344
+ "contributors_2024": 13,
345
+ "contributors_2023": 0
346
+ },
347
+ {
348
+ "repo_name": "torchtitan",
349
+ "repo_link": "https://github.com/pytorch/torchtitan",
350
+ "category": "training framework",
351
+ "github_about_section": "A PyTorch native platform for training generative AI models",
352
+ "homepage_link": "https://arxiv.org/abs/2410.06511",
353
+ "contributors_all": 145,
354
+ "contributors_2025": 119,
355
+ "contributors_2024": 43,
356
+ "contributors_2023": 1
357
+ },
358
+ {
359
+ "repo_name": "ao",
360
+ "repo_link": "https://github.com/pytorch/ao",
361
+ "category": "quantization",
362
+ "github_about_section": "PyTorch native quantization and sparsity for training and inference",
363
+ "homepage_link": "https://pytorch.org/ao",
364
+ "github_topic_closest_fit": "quantization",
365
+ "contributors_all": 178,
366
+ "contributors_2025": 114,
367
+ "contributors_2024": 100,
368
+ "contributors_2023": 5
369
+ },
370
+ {
371
+ "repo_name": "lean4",
372
+ "repo_link": "https://github.com/leanprover/lean4",
373
+ "category": "theorem prover",
374
+ "github_about_section": "Lean 4 programming language and theorem prover",
375
+ "homepage_link": "https://lean-lang.org",
376
+ "github_topic_closest_fit": "lean",
377
+ "contributors_all": 278,
378
+ "contributors_2025": 110,
379
+ "contributors_2024": 85,
380
+ "contributors_2023": 64
381
+ },
382
+ {
383
+ "repo_name": "ComfyUI",
384
+ "repo_link": "https://github.com/comfyanonymous/ComfyUI",
385
+ "category": "user interface",
386
+ "github_about_section": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.",
387
+ "homepage_link": "https://comfy.org",
388
+ "github_topic_closest_fit": "stable-diffusion",
389
+ "contributors_all": 278,
390
+ "contributors_2025": 108,
391
+ "contributors_2024": 119,
392
+ "contributors_2023": 94
393
+ },
394
+ {
395
+ "repo_name": "unsloth",
396
+ "repo_link": "https://github.com/unslothai/unsloth",
397
+ "category": "fine tuning",
398
+ "github_about_section": "Fine-tuning & Reinforcement Learning for LLMs. Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.",
399
+ "homepage_link": "https://docs.unsloth.ai",
400
+ "github_topic_closest_fit": "fine-tuning",
401
+ "contributors_all": 127,
402
+ "contributors_2025": 102,
403
+ "contributors_2024": 27,
404
+ "contributors_2023": 3
405
+ },
406
+ {
407
+ "repo_name": "burn",
408
+ "repo_link": "https://github.com/tracel-ai/burn",
409
+ "category": "multi-purpose library",
410
+ "github_about_section": "Burn is a next generation tensor library and Deep Learning Framework that doesn't compromise on flexibility, efficiency and portability.",
411
+ "homepage_link": "https://burn.dev",
412
+ "contributors_all": 237,
413
+ "contributors_2025": 99,
414
+ "contributors_2024": 104,
415
+ "contributors_2023": 62
416
+ },
417
+ {
418
+ "repo_name": "accelerate",
419
+ "repo_link": "https://github.com/huggingface/accelerate",
420
+ "category": "training framework",
421
+ "github_about_section": "A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support.",
422
+ "homepage_link": "https://huggingface.co/docs/accelerate",
423
+ "contributors_all": 392,
424
+ "contributors_2025": 97,
425
+ "contributors_2024": 124,
426
+ "contributors_2023": 149
427
+ },
428
+ {
429
+ "repo_name": "terminal-bench",
430
+ "repo_link": "https://github.com/laude-institute/terminal-bench",
431
+ "category": "benchmark",
432
+ "github_about_section": "A benchmark for LLMs on complicated tasks in the terminal",
433
+ "homepage_link": "https://tbench.ai",
434
+ "github_topic_closest_fit": "benchmark",
435
+ "contributors_all": 96,
436
+ "contributors_2025": 96,
437
+ "contributors_2024": 0,
438
+ "contributors_2023": 0
439
+ },
440
+ {
441
+ "repo_name": "DeepSpeed",
442
+ "repo_link": "https://github.com/deepspeedai/DeepSpeed",
443
+ "category": "training framework",
444
+ "github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
445
+ "homepage_link": "https://deepspeed.ai",
446
+ "contributors_all": 442,
447
+ "contributors_2025": 96,
448
+ "contributors_2024": 134,
449
+ "contributors_2023": 165
450
+ },
451
+ {
452
+ "repo_name": "milvus",
453
+ "repo_link": "https://github.com/milvus-io/milvus",
454
+ "category": "vector database",
455
+ "github_about_section": "Milvus is a high-performance, cloud-native vector database built for scalable vector ANN search",
456
+ "homepage_link": "https://milvus.io",
457
+ "github_topic_closest_fit": "vector-search",
458
+ "contributors_all": 387,
459
+ "contributors_2025": 95,
460
+ "contributors_2024": 84,
461
+ "contributors_2023": 72
462
+ },
463
+ {
464
+ "repo_name": "cutlass",
465
+ "repo_link": "https://github.com/NVIDIA/cutlass",
466
+ "category": "parallel computing",
467
+ "github_about_section": "CUDA Templates and Python DSLs for High-Performance Linear Algebra",
468
+ "homepage_link": "https://docs.nvidia.com/cutlass/index.html",
469
+ "github_topic_closest_fit": "parallel-programming",
470
+ "contributors_all": 238,
471
+ "contributors_2025": 94,
472
+ "contributors_2024": 64,
473
+ "contributors_2023": 66
474
+ },
475
+ {
476
+ "repo_name": "tilelang",
477
+ "repo_link": "https://github.com/tile-ai/tilelang",
478
+ "category": "parallel computing dsl",
479
+ "github_about_section": "Domain-specific language designed to streamline the development of high-performance GPU/CPU/Accelerators kernels",
480
+ "homepage_link": "https://tilelang.com",
481
+ "github_topic_closest_fit": "parallel-programming",
482
+ "contributors_all": 90,
483
+ "contributors_2025": 89,
484
+ "contributors_2024": 1,
485
+ "contributors_2023": 0
486
+ },
487
+ {
488
+ "repo_name": "monarch",
489
+ "repo_link": "https://github.com/meta-pytorch/monarch",
490
+ "category": "distributed computing",
491
+ "github_about_section": "PyTorch Single Controller",
492
+ "homepage_link": "https://meta-pytorch.org/monarch",
493
+ "contributors_all": 85,
494
+ "contributors_2025": 85,
495
+ "contributors_2024": 0,
496
+ "contributors_2023": 0
497
+ },
498
+ {
499
+ "repo_name": "Liger-Kernel",
500
+ "repo_link": "https://github.com/linkedin/Liger-Kernel",
501
+ "category": "kernel examples",
502
+ "github_about_section": "Efficient Triton Kernels for LLM Training",
503
+ "homepage_link": "https://openreview.net/pdf?id=36SjAIT42G",
504
+ "github_topic_closest_fit": "triton",
505
+ "contributors_all": 120,
506
+ "contributors_2025": 78,
507
+ "contributors_2024": 61,
508
+ "contributors_2023": 0
509
+ },
510
+ {
511
+ "repo_name": "nixl",
512
+ "repo_link": "https://github.com/ai-dynamo/nixl",
513
+ "category": "distributed computing",
514
+ "github_about_section": "NVIDIA Inference Xfer Library (NIXL)",
515
+ "contributors_all": 78,
516
+ "contributors_2025": 78,
517
+ "contributors_2024": 0,
518
+ "contributors_2023": 0
519
+ },
520
+ {
521
+ "repo_name": "jupyterlab",
522
+ "repo_link": "https://github.com/jupyterlab/jupyterlab",
523
+ "category": "user interface",
524
+ "github_about_section": "JupyterLab computational environment.",
525
+ "homepage_link": "https://jupyterlab.readthedocs.io",
526
+ "github_topic_closest_fit": "jupyter",
527
+ "contributors_all": 698,
528
+ "contributors_2025": 77,
529
+ "contributors_2024": 85,
530
+ "contributors_2023": 100
531
+ },
532
+ {
533
+ "repo_name": "hipBLASLt",
534
+ "repo_link": "https://github.com/AMD-AGI/hipBLASLt",
535
+ "category": "Basic Linear Algebra Subprograms (BLAS)",
536
+ "github_about_section": "hipBLASLt is a library that provides general matrix-matrix operations with a flexible API and extends functionalities beyond a traditional BLAS library",
537
+ "homepage_link": "https://rocm.docs.amd.com/projects/hipBLASLt",
538
+ "github_topic_closest_fit": "matrix-multiplication",
539
+ "contributors_all": 111,
540
+ "contributors_2025": 69,
541
+ "contributors_2024": 70,
542
+ "contributors_2023": 35
543
+ },
544
+ {
545
+ "repo_name": "peft",
546
+ "repo_link": "https://github.com/huggingface/peft",
547
+ "category": "fine tuning",
548
+ "github_about_section": "PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.",
549
+ "homepage_link": "https://huggingface.co/docs/peft",
550
+ "contributors_all": 272,
551
+ "contributors_2025": 69,
552
+ "contributors_2024": 111,
553
+ "contributors_2023": 115
554
+ },
555
+ {
556
+ "repo_name": "ROCm",
557
+ "repo_link": "https://github.com/ROCm/ROCm",
558
+ "category": "multi-purpose library",
559
+ "github_about_section": "AMD ROCm Software - GitHub Home",
560
+ "homepage_link": "https://rocm.docs.amd.com",
561
+ "contributors_all": 166,
562
+ "contributors_2025": 67,
563
+ "contributors_2024": 61,
564
+ "contributors_2023": 44
565
+ },
566
+ {
567
+ "repo_name": "mcp-agent",
568
+ "repo_link": "https://github.com/lastmile-ai/mcp-agent",
569
+ "category": "mcp",
570
+ "github_about_section": "Build effective agents using Model Context Protocol and simple workflow patterns",
571
+ "github_topic_closest_fit": "mcp",
572
+ "contributors_all": 63,
573
+ "contributors_2025": 63,
574
+ "contributors_2024": 1,
575
+ "contributors_2023": 0
576
+ },
577
+ {
578
+ "repo_name": "rdma-core",
579
+ "repo_link": "https://github.com/linux-rdma/rdma-core",
580
+ "category": "systems level code",
581
+ "github_about_section": "RDMA core userspace libraries and daemons",
582
+ "contributors_all": 437,
583
+ "contributors_2025": 58,
584
+ "contributors_2024": 61,
585
+ "contributors_2023": 66
586
+ },
587
+ {
588
+ "repo_name": "onnx",
589
+ "repo_link": "https://github.com/onnx/onnx",
590
+ "category": "machine learning interoperability",
591
+ "github_about_section": "Open standard for machine learning interoperability",
592
+ "homepage_link": "https://onnx.ai",
593
+ "github_topic_closest_fit": "onnx",
594
+ "contributors_all": 370,
595
+ "contributors_2025": 56,
596
+ "contributors_2024": 45,
597
+ "contributors_2023": 61
598
+ },
599
+ {
600
+ "repo_name": "letta",
601
+ "repo_link": "https://github.com/letta-ai/letta",
602
+ "category": "agent",
603
+ "github_about_section": "Letta is the platform for building stateful agents: open AI with advanced memory that can learn and self-improve over time.",
604
+ "homepage_link": "https://docs.letta.com",
605
+ "github_topic_closest_fit": "ai-agents",
606
+ "contributors_all": 157,
607
+ "contributors_2025": 56,
608
+ "contributors_2024": 75,
609
+ "contributors_2023": 47
610
+ },
611
+ {
612
+ "repo_name": "helion",
613
+ "repo_link": "https://github.com/pytorch/helion",
614
+ "category": "parallel computing dsl",
615
+ "github_about_section": "A Python-embedded DSL that makes it easy to write fast, scalable ML kernels with minimal boilerplate.",
616
+ "homepage_link": "https://helionlang.com",
617
+ "github_topic_closest_fit": "parallel-programming",
618
+ "contributors_all": 49,
619
+ "contributors_2025": 49,
620
+ "contributors_2024": 0,
621
+ "contributors_2023": 0
622
+ },
623
+ {
624
+ "repo_name": "openevolve",
625
+ "repo_link": "https://github.com/codelion/openevolve",
626
+ "category": "evolutionary algorithm",
627
+ "github_about_section": "Open-source implementation of AlphaEvolve",
628
+ "github_topic_closest_fit": "genetic-algorithm",
629
+ "contributors_all": 46,
630
+ "contributors_2025": 46,
631
+ "contributors_2024": 0,
632
+ "contributors_2023": 0
633
+ },
634
+ {
635
+ "repo_name": "lightning-thunder",
636
+ "repo_link": "https://github.com/Lightning-AI/lightning-thunder",
637
+ "category": "model compiler",
638
+ "github_about_section": "PyTorch compiler that accelerates training and inference. Get built-in optimizations for performance, memory, parallelism, and easily write your own.",
639
+ "contributors_all": 76,
640
+ "contributors_2025": 44,
641
+ "contributors_2024": 47,
642
+ "contributors_2023": 29
643
+ },
644
+ {
645
+ "repo_name": "truss",
646
+ "repo_link": "https://github.com/basetenlabs/truss",
647
+ "category": "inference engine",
648
+ "github_about_section": "The simplest way to serve AI/ML models in production",
649
+ "homepage_link": "https://truss.baseten.co",
650
+ "github_topic_closest_fit": "inference",
651
+ "contributors_all": 72,
652
+ "contributors_2025": 44,
653
+ "contributors_2024": 30,
654
+ "contributors_2023": 21
655
+ },
656
+ {
657
+ "repo_name": "ondemand",
658
+ "repo_link": "https://github.com/OSC/ondemand",
659
+ "category": "hpc portal",
660
+ "github_about_section": "Supercomputing. Seamlessly. Open, Interactive HPC Via the Web",
661
+ "homepage_link": "https://openondemand.org",
662
+ "github_topic_closest_fit": "hpc",
663
+ "contributors_all": 117,
664
+ "contributors_2025": 43,
665
+ "contributors_2024": 23,
666
+ "contributors_2023": 21
667
+ },
668
+ {
669
+ "repo_name": "pybind11",
670
+ "repo_link": "https://github.com/pybind/pybind11",
671
+ "category": "middleware",
672
+ "github_about_section": "Seamless operability between C++11 and Python",
673
+ "homepage_link": "https://pybind11.readthedocs.io",
674
+ "github_topic_closest_fit": "bindings",
675
+ "contributors_all": 404,
676
+ "contributors_2025": 43,
677
+ "contributors_2024": 45,
678
+ "contributors_2023": 42
679
+ },
680
+ {
681
+ "repo_name": "cuda-python",
682
+ "repo_link": "https://github.com/NVIDIA/cuda-python",
683
+ "category": "middleware",
684
+ "github_about_section": "CUDA Python: Performance meets Productivity",
685
+ "homepage_link": "https://nvidia.github.io/cuda-python",
686
+ "github_topic_closest_fit": "parallel-programming",
687
+ "contributors_all": 48,
688
+ "contributors_2025": 41,
689
+ "contributors_2024": 12,
690
+ "contributors_2023": 1
691
+ },
692
+ {
693
+ "repo_name": "warp",
694
+ "repo_link": "https://github.com/NVIDIA/warp",
695
+ "category": "spatial computing",
696
+ "github_about_section": "A Python framework for accelerated simulation, data generation and spatial computing.",
697
+ "homepage_link": "https://nvidia.github.io/warp",
698
+ "github_topic_closest_fit": "physics-simulation",
699
+ "contributors_all": 79,
700
+ "contributors_2025": 40,
701
+ "contributors_2024": 29,
702
+ "contributors_2023": 17
703
+ },
704
+ {
705
+ "repo_name": "metaflow",
706
+ "repo_link": "https://github.com/Netflix/metaflow",
707
+ "category": "container orchestration",
708
+ "github_about_section": "Build, Manage and Deploy AI/ML Systems",
709
+ "homepage_link": "https://metaflow.org",
710
+ "contributors_all": 121,
711
+ "contributors_2025": 37,
712
+ "contributors_2024": 35,
713
+ "contributors_2023": 28
714
+ },
715
+ {
716
+ "repo_name": "numba",
717
+ "repo_link": "https://github.com/numba/numba",
718
+ "category": "compiler",
719
+ "github_about_section": "NumPy aware dynamic Python compiler using LLVM",
720
+ "homepage_link": "https://numba.pydata.org",
721
+ "contributors_all": 430,
722
+ "contributors_2025": 36,
723
+ "contributors_2024": 32,
724
+ "contributors_2023": 55
725
+ },
726
+ {
727
+ "repo_name": "SWE-bench",
728
+ "repo_link": "https://github.com/SWE-bench/SWE-bench",
729
+ "category": "benchmark",
730
+ "github_about_section": "SWE-bench: Can Language Models Resolve Real-world Github Issues?",
731
+ "homepage_link": "https://swebench.com",
732
+ "github_topic_closest_fit": "benchmark",
733
+ "contributors_all": 66,
734
+ "contributors_2025": 33,
735
+ "contributors_2024": 37,
736
+ "contributors_2023": 9
737
+ },
738
+ {
739
+ "repo_name": "AdaptiveCpp",
740
+ "repo_link": "https://github.com/AdaptiveCpp/AdaptiveCpp",
741
+ "category": "compiler",
742
+ "github_about_section": "Compiler for multiple programming models (SYCL, C++ standard parallelism, HIP/CUDA) for CPUs and GPUs from all vendors: The independent, community-driven compiler for C++-based heterogeneous programming models. Lets applications adapt themselves to all the hardware in the system - even at runtime!",
743
+ "homepage_link": "https://adaptivecpp.github.io",
744
+ "contributors_all": 93,
745
+ "contributors_2025": 32,
746
+ "contributors_2024": 32,
747
+ "contributors_2023": 24
748
+ },
749
+ {
750
+ "repo_name": "Triton-distributed",
751
+ "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
752
+ "category": "distributed computing",
753
+ "github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
754
+ "homepage_link": "https://triton-distributed.readthedocs.io",
755
+ "contributors_all": 30,
756
+ "contributors_2025": 30,
757
+ "contributors_2024": 0,
758
+ "contributors_2023": 0
759
+ },
760
+ {
761
+ "repo_name": "ThunderKittens",
762
+ "repo_link": "https://github.com/HazyResearch/ThunderKittens",
763
+ "category": "parallel computing",
764
+ "github_about_section": "Tile primitives for speedy kernels",
765
+ "homepage_link": "https://hazyresearch.stanford.edu/blog/2024-10-29-tk2",
766
+ "github_topic_closest_fit": "parallel-programming",
767
+ "contributors_all": 34,
768
+ "contributors_2025": 29,
769
+ "contributors_2024": 13,
770
+ "contributors_2023": 0
771
+ },
772
+ {
773
+ "repo_name": "dstack",
774
+ "repo_link": "https://github.com/dstackai/dstack",
775
+ "category": "container orchestration",
776
+ "github_about_section": "dstack is an open-source control plane for running development, training, and inference jobs on GPUs-across hyperscalers, neoclouds, or on-prem.",
777
+ "homepage_link": "https://dstack.ai",
778
+ "github_topic_closest_fit": "orchestration",
779
+ "contributors_all": 69,
780
+ "contributors_2025": 28,
781
+ "contributors_2024": 42,
782
+ "contributors_2023": 14
783
+ },
784
+ {
785
+ "repo_name": "ome",
786
+ "repo_link": "https://github.com/sgl-project/ome",
787
+ "category": "container orchestration",
788
+ "github_about_section": "OME is a Kubernetes operator for enterprise-grade management and serving of Large Language Models (LLMs)",
789
+ "homepage_link": "http://docs.sglang.ai/ome",
790
+ "github_topic_closest_fit": "k8s",
791
+ "contributors_all": 28,
792
+ "contributors_2025": 28,
793
+ "contributors_2024": 0,
794
+ "contributors_2023": 0
795
+ },
796
+ {
797
+ "repo_name": "pocl",
798
+ "repo_link": "https://github.com/pocl/pocl",
799
+ "category": "parallel computing",
800
+ "github_about_section": "pocl - Portable Computing Language",
801
+ "homepage_link": "https://portablecl.org",
802
+ "github_topic_closest_fit": "parallel-programming",
803
+ "contributors_all": 166,
804
+ "contributors_2025": 26,
805
+ "contributors_2024": 27,
806
+ "contributors_2023": 21
807
+ },
808
+ {
809
+ "repo_name": "server",
810
+ "repo_link": "https://github.com/triton-inference-server/server",
811
+ "category": "inference server",
812
+ "github_about_section": "The Triton Inference Server provides an optimized cloud and edge inferencing solution.",
813
+ "homepage_link": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html",
814
+ "github_topic_closest_fit": "inference",
815
+ "contributors_all": 147,
816
+ "contributors_2025": 24,
817
+ "contributors_2024": 36,
818
+ "contributors_2023": 34
819
+ },
820
+ {
821
+ "repo_name": "Vulkan-Hpp",
822
+ "repo_link": "https://github.com/KhronosGroup/Vulkan-Hpp",
823
+ "category": "graphics api",
824
+ "github_about_section": "Open-Source Vulkan C++ API",
825
+ "homepage_link": "https://vulkan.org",
826
+ "github_topic_closest_fit": "vulkan",
827
+ "contributors_all": 102,
828
+ "contributors_2025": 21,
829
+ "contributors_2024": 15,
830
+ "contributors_2023": 15
831
+ },
832
+ {
833
+ "repo_name": "ccache",
834
+ "repo_link": "https://github.com/ccache/ccache",
835
+ "category": "compiler",
836
+ "github_about_section": "ccache - a fast compiler cache",
837
+ "homepage_link": "https://ccache.dev",
838
+ "contributors_all": 218,
839
+ "contributors_2025": 20,
840
+ "contributors_2024": 28,
841
+ "contributors_2023": 22
842
+ },
843
+ {
844
+ "repo_name": "lapack",
845
+ "repo_link": "https://github.com/Reference-LAPACK/lapack",
846
+ "category": "linear algebra",
847
+ "github_about_section": "LAPACK is a library of Fortran subroutines for solving the most commonly occurring problems in numerical linear algebra.",
848
+ "homepage_link": "https://netlib.org/lapack",
849
+ "github_topic_closest_fit": "linear-algebra",
850
+ "contributors_all": 178,
851
+ "contributors_2025": 20,
852
+ "contributors_2024": 24,
853
+ "contributors_2023": 42
854
+ },
855
+ {
856
+ "repo_name": "tflite-micro",
857
+ "repo_link": "https://github.com/tensorflow/tflite-micro",
858
+ "category": "ML for Microcontrollers",
859
+ "github_about_section": "Infrastructure to enable deployment of ML models to low-power resource-constrained embedded targets (including microcontrollers and digital signal processors).",
860
+ "contributors_all": 111,
861
+ "contributors_2025": 19,
862
+ "contributors_2024": 25,
863
+ "contributors_2023": 31
864
+ },
865
+ {
866
+ "repo_name": "quack",
867
+ "repo_link": "https://github.com/Dao-AILab/quack",
868
+ "category": "kernel examples",
869
+ "github_about_section": "A Quirky Assortment of CuTe Kernels",
870
+ "contributors_all": 17,
871
+ "contributors_2025": 17,
872
+ "contributors_2024": 0,
873
+ "contributors_2023": 0
874
+ },
875
+ {
876
+ "repo_name": "oneDPL",
877
+ "repo_link": "https://github.com/uxlfoundation/oneDPL",
878
+ "category": "parallel computing",
879
+ "github_about_section": "oneAPI DPC++ Library (oneDPL)",
880
+ "homepage_link": "https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/dpc-library.html",
881
+ "contributors_all": 67,
882
+ "contributors_2025": 17,
883
+ "contributors_2024": 29,
884
+ "contributors_2023": 28
885
+ },
886
+ {
887
+ "repo_name": "KernelBench",
888
+ "repo_link": "https://github.com/ScalingIntelligence/KernelBench",
889
+ "category": "benchmark",
890
+ "github_about_section": "KernelBench: Can LLMs Write GPU Kernels? - Benchmark with Torch -> CUDA problems",
891
+ "homepage_link": "https://scalingintelligence.stanford.edu/blogs/kernelbench",
892
+ "github_topic_closest_fit": "benchmark",
893
+ "contributors_all": 19,
894
+ "contributors_2025": 16,
895
+ "contributors_2024": 3,
896
+ "contributors_2023": 0
897
+ },
898
+ {
899
+ "repo_name": "reference-kernels",
900
+ "repo_link": "https://github.com/gpu-mode/reference-kernels",
901
+ "category": "kernel examples",
902
+ "github_about_section": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!",
903
+ "homepage_link": "https://gpumode.com",
904
+ "contributors_all": 16,
905
+ "contributors_2025": 16,
906
+ "contributors_2024": 0,
907
+ "contributors_2023": 0
908
+ },
909
+ {
910
+ "repo_name": "synthetic-data-kit",
911
+ "repo_link": "https://github.com/meta-llama/synthetic-data-kit",
912
+ "category": "synthetic data generation",
913
+ "github_about_section": "Tool for generating high quality Synthetic datasets",
914
+ "homepage_link": "https://pypi.org/project/synthetic-data-kit",
915
+ "github_topic_closest_fit": "synthetic-dataset-generation",
916
+ "contributors_all": 15,
917
+ "contributors_2025": 15,
918
+ "contributors_2024": 0,
919
+ "contributors_2023": 0
920
+ },
921
+ {
922
+ "repo_name": "tritonparse",
923
+ "repo_link": "https://github.com/meta-pytorch/tritonparse",
924
+ "category": "performance testing",
925
+ "github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
926
+ "homepage_link": "https://meta-pytorch.org/tritonparse",
927
+ "contributors_all": 15,
928
+ "contributors_2025": 15,
929
+ "contributors_2024": 0,
930
+ "contributors_2023": 0
931
+ },
932
+ {
933
+ "repo_name": "kernels",
934
+ "repo_link": "https://github.com/huggingface/kernels",
935
+ "category": "gpu kernels",
936
+ "github_about_section": "Load compute kernels from the Hub",
937
+ "contributors_all": 15,
938
+ "contributors_2025": 14,
939
+ "contributors_2024": 2,
940
+ "contributors_2023": 0
941
+ },
942
+ {
943
+ "repo_name": "Wan2.2",
944
+ "repo_link": "https://github.com/Wan-Video/Wan2.2",
945
+ "category": "video generation",
946
+ "github_about_section": "Wan: Open and Advanced Large-Scale Video Generative Models",
947
+ "homepage_link": "https://wan.video",
948
+ "github_topic_closest_fit": "diffusion-models",
949
+ "contributors_all": 14,
950
+ "contributors_2025": 14,
951
+ "contributors_2024": 0,
952
+ "contributors_2023": 0
953
+ },
954
+ {
955
+ "repo_name": "SYCL-Docs",
956
+ "repo_link": "https://github.com/KhronosGroup/SYCL-Docs",
957
+ "category": "parallel computing",
958
+ "github_about_section": "SYCL Open Source Specification",
959
+ "homepage_link": "https://khronos.org/sycl",
960
+ "github_topic_closest_fit": "parallel-programming",
961
+ "contributors_all": 67,
962
+ "contributors_2025": 13,
963
+ "contributors_2024": 20,
964
+ "contributors_2023": 27
965
+ },
966
+ {
967
+ "repo_name": "Primus-Turbo",
968
+ "repo_link": "https://github.com/AMD-AGI/Primus-Turbo",
969
+ "category": "training framework",
970
+ "github_about_section": "Primus-Turbo is a high-performance acceleration library dedicated to large-scale model training on AMD GPUs. Built and optimized for the AMD ROCm platform, it covers the full training stack — including core compute operators (GEMM, Attention, GroupedGEMM), communication primitives, optimizer modules, low-precision computation (FP8), and compute–communication overlap kernels.",
971
+ "contributors_all": 12,
972
+ "contributors_2025": 12,
973
+ "contributors_2024": 0,
974
+ "contributors_2023": 0
975
+ },
976
+ {
977
+ "repo_name": "flashinfer-bench",
978
+ "repo_link": "https://github.com/flashinfer-ai/flashinfer-bench",
979
+ "category": "benchmark",
980
+ "github_about_section": "Building the Virtuous Cycle for AI-driven LLM Systems",
981
+ "homepage_link": "https://bench.flashinfer.ai",
982
+ "github_topic_closest_fit": "benchmark",
983
+ "contributors_all": 12,
984
+ "contributors_2025": 11,
985
+ "contributors_2024": 0,
986
+ "contributors_2023": 0
987
+ },
988
+ {
989
+ "repo_name": "FTorch",
990
+ "repo_link": "https://github.com/Cambridge-ICCS/FTorch",
991
+ "category": "middleware",
992
+ "github_about_section": "A library for directly calling PyTorch ML models from Fortran.",
993
+ "homepage_link": "https://cambridge-iccs.github.io/FTorch",
994
+ "github_topic_closest_fit": "machine-learning",
995
+ "contributors_all": 20,
996
+ "contributors_2025": 11,
997
+ "contributors_2024": 8,
998
+ "contributors_2023": 9
999
+ },
1000
+ {
1001
+ "repo_name": "TensorRT",
1002
+ "repo_link": "https://github.com/NVIDIA/TensorRT",
1003
+ "category": "inference engine",
1004
+ "github_about_section": "NVIDIA TensorRT is an SDK for high-performance deep learning inference on NVIDIA GPUs. This repository contains the open source components of TensorRT.",
1005
+ "homepage_link": "https://developer.nvidia.com/tensorrt",
1006
+ "contributors_all": 104,
1007
+ "contributors_2025": 10,
1008
+ "contributors_2024": 18,
1009
+ "contributors_2023": 19
1010
+ },
1011
+ {
1012
+ "repo_name": "TileIR",
1013
+ "repo_link": "https://github.com/microsoft/TileIR",
1014
+ "category": "parallel computing dsl",
1015
+ "github_about_section": "TileIR (tile-ir) is a concise domain-specific IR designed to streamline the development of high-performance GPU/CPU kernels (e.g., GEMM, Dequant GEMM, FlashAttention, LinearAttention). By employing a Pythonic syntax with an underlying compiler infrastructure on top of TVM, TileIR allows developers to focus on productivity without sacrificing the low-level optimizations necessary for state-of-the-art performance.",
1016
+ "github_topic_closest_fit": "parallel-programming",
1017
+ "contributors_all": 10,
1018
+ "contributors_2025": 10,
1019
+ "contributors_2024": 1,
1020
+ "contributors_2023": 0
1021
+ },
1022
+ {
1023
+ "repo_name": "kernels-community",
1024
+ "repo_link": "https://github.com/huggingface/kernels-community",
1025
+ "category": "gpu kernels",
1026
+ "homepage_link": "https://huggingface.co/kernels-community",
1027
+ "github_about_section": "Kernel sources for https://huggingface.co/kernels-community",
1028
+ "contributors_all": 9,
1029
+ "contributors_2025": 9,
1030
+ "contributors_2024": 0,
1031
+ "contributors_2023": 0
1032
+ },
1033
+ {
1034
+ "repo_name": "GEAK-agent",
1035
+ "repo_link": "https://github.com/AMD-AGI/GEAK-agent",
1036
+ "category": "agent",
1037
+ "github_about_section": "It is an LLM-based AI agent, which can write correct and efficient gpu kernels automatically.",
1038
+ "github_topic_closest_fit": "ai-agents",
1039
+ "contributors_all": 9,
1040
+ "contributors_2025": 9,
1041
+ "contributors_2024": 0,
1042
+ "contributors_2023": 0
1043
+ },
1044
+ {
1045
+ "repo_name": "OpenCL-SDK",
1046
+ "repo_link": "https://github.com/KhronosGroup/OpenCL-SDK",
1047
+ "category": "parallel computing",
1048
+ "github_about_section": "OpenCL SDK",
1049
+ "homepage_link": "https://khronos.org/opencl",
1050
+ "github_topic_closest_fit": "parallel-programming",
1051
+ "contributors_all": 25,
1052
+ "contributors_2025": 8,
1053
+ "contributors_2024": 6,
1054
+ "contributors_2023": 9
1055
+ },
1056
+ {
1057
+ "repo_name": "ZLUDA",
1058
+ "repo_link": "https://github.com/vosen/ZLUDA",
1059
+ "category": "middleware",
1060
+ "github_about_section": "CUDA on non-NVIDIA GPUs",
1061
+ "homepage_link": "https://vosen.github.io/ZLUDA",
1062
+ "github_topic_closest_fit": "parallel-programming",
1063
+ "contributors_all": 15,
1064
+ "contributors_2025": 8,
1065
+ "contributors_2024": 4,
1066
+ "contributors_2023": 0
1067
+ },
1068
+ {
1069
+ "repo_name": "intelliperf",
1070
+ "repo_link": "https://github.com/AMDResearch/intelliperf",
1071
+ "category": "performance testing",
1072
+ "github_about_section": "Automated bottleneck detection and solution orchestration",
1073
+ "homepage_link": "https://arxiv.org/html/2508.20258v1",
1074
+ "github_topic_closest_fit": "profiling",
1075
+ "contributors_all": 7,
1076
+ "contributors_2025": 7,
1077
+ "contributors_2024": 0,
1078
+ "contributors_2023": 0
1079
+ },
1080
+ {
1081
+ "repo_name": "nccl",
1082
+ "repo_link": "https://github.com/NVIDIA/nccl",
1083
+ "category": "distributed computing",
1084
+ "github_about_section": "Optimized primitives for collective multi-GPU communication",
1085
+ "homepage_link": "https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html",
1086
+ "contributors_all": 51,
1087
+ "contributors_2025": 7,
1088
+ "contributors_2024": 5,
1089
+ "contributors_2023": 6
1090
+ },
1091
+ {
1092
+ "repo_name": "cudnn-frontend",
1093
+ "repo_link": "https://github.com/NVIDIA/cudnn-frontend",
1094
+ "category": "parallel computing",
1095
+ "github_about_section": "cudnn_frontend provides a c++ wrapper for the cudnn backend API and samples on how to use it",
1096
+ "homepage_link": "https://developer.nvidia.com/cudnn",
1097
+ "github_topic_closest_fit": "parallel-programming",
1098
+ "contributors_all": 12,
1099
+ "contributors_2025": 6,
1100
+ "contributors_2024": 5,
1101
+ "contributors_2023": 1
1102
+ },
1103
+ {
1104
+ "repo_name": "BitBLAS",
1105
+ "repo_link": "https://github.com/microsoft/BitBLAS",
1106
+ "category": "Basic Linear Algebra Subprograms (BLAS)",
1107
+ "github_about_section": "BitBLAS is a library to support mixed-precision matrix multiplications, especially for quantized LLM deployment.",
1108
+ "github_topic_closest_fit": "matrix-multiplication",
1109
+ "contributors_all": 17,
1110
+ "contributors_2025": 5,
1111
+ "contributors_2024": 14,
1112
+ "contributors_2023": 0
1113
+ },
1114
+ {
1115
+ "repo_name": "Self-Forcing",
1116
+ "repo_link": "https://github.com/guandeh17/Self-Forcing",
1117
+ "category": "video generation",
1118
+ "github_about_section": "Official codebase for \"Self Forcing: Bridging Training and Inference in Autoregressive Video Diffusion\" (NeurIPS 2025 Spotlight)",
1119
+ "homepage_link": "https://self-forcing.github.io",
1120
+ "github_topic_closest_fit": "diffusion-models",
1121
+ "contributors_all": 4,
1122
+ "contributors_2025": 4,
1123
+ "contributors_2024": 0,
1124
+ "contributors_2023": 0
1125
+ },
1126
+ {
1127
+ "repo_name": "TritonBench",
1128
+ "repo_link": "https://github.com/thunlp/TritonBench",
1129
+ "category": "benchmark",
1130
+ "github_about_section": "TritonBench: Benchmarking Large Language Model Capabilities for Generating Triton Operators",
1131
+ "homepage_link": "https://arxiv.org/abs/2502.14752",
1132
+ "github_topic_closest_fit": "benchmark",
1133
+ "contributors_all": 3,
1134
+ "contributors_2025": 3,
1135
+ "contributors_2024": 0,
1136
+ "contributors_2023": 0
1137
+ },
1138
+ {
1139
+ "repo_name": "hatchet",
1140
+ "repo_link": "https://github.com/LLNL/hatchet",
1141
+ "category": "performance testing",
1142
+ "github_about_section": "Graph-indexed Pandas DataFrames for analyzing hierarchical performance data",
1143
+ "homepage_link": "https://llnl-hatchet.readthedocs.io",
1144
+ "github_topic_closest_fit": "profiling",
1145
+ "contributors_all": 25,
1146
+ "contributors_2025": 3,
1147
+ "contributors_2024": 6,
1148
+ "contributors_2023": 8
1149
+ },
1150
+ {
1151
+ "repo_name": "streamv2v",
1152
+ "repo_link": "https://github.com/Jeff-LiangF/streamv2v",
1153
+ "category": "video generation",
1154
+ "github_about_section": "Official Pytorch implementation of StreamV2V.",
1155
+ "homepage_link": "https://jeff-liangf.github.io/projects/streamv2v",
1156
+ "github_topic_closest_fit": "diffusion-models",
1157
+ "contributors_all": 7,
1158
+ "contributors_2025": 3,
1159
+ "contributors_2024": 6,
1160
+ "contributors_2023": 0
1161
+ },
1162
+ {
1163
+ "repo_name": "mistral-inference",
1164
+ "repo_link": "https://github.com/mistralai/mistral-inference",
1165
+ "category": "inference engine",
1166
+ "github_about_section": "Official inference library for Mistral models",
1167
+ "homepage_link": "https://mistral.ai",
1168
+ "github_topic_closest_fit": "inference",
1169
+ "contributors_all": 29,
1170
+ "contributors_2025": 2,
1171
+ "contributors_2024": 17,
1172
+ "contributors_2023": 14
1173
+ },
1174
+ {
1175
+ "repo_name": "omnitrace",
1176
+ "repo_link": "https://github.com/ROCm/omnitrace",
1177
+ "category": "performance testing",
1178
+ "github_about_section": "Omnitrace: Application Profiling, Tracing, and Analysis",
1179
+ "homepage_link": "https://rocm.docs.amd.com/projects/omnitrace",
1180
+ "github_topic_closest_fit": "profiling",
1181
+ "contributors_all": 16,
1182
+ "contributors_2025": 2,
1183
+ "contributors_2024": 12,
1184
+ "contributors_2023": 2
1185
+ },
1186
+ {
1187
+ "repo_name": "cuJSON",
1188
+ "repo_link": "https://github.com/AutomataLab/cuJSON",
1189
+ "category": "library leveraging parallel compute",
1190
+ "github_about_section": "cuJSON: A Highly Parallel JSON Parser for GPUs",
1191
+ "homepage_link": "https://dl.acm.org/doi/10.1145/3760250.3762222",
1192
+ "github_topic_closest_fit": "json-parser",
1193
+ "contributors_all": 2,
1194
+ "contributors_2025": 2,
1195
+ "contributors_2024": 2,
1196
+ "contributors_2023": 0
1197
+ },
1198
+ {
1199
+ "repo_name": "IMO2025",
1200
+ "repo_link": "https://github.com/harmonic-ai/IMO2025",
1201
+ "category": "formal mathematical reasoning",
1202
+ "github_about_section": "Harmonic's model Aristotle achieved gold medal performance, solving 5 problems. This repository contains the lean statement files and proofs for Problems 1-5.",
1203
+ "homepage_link": "https://harmonic.fun",
1204
+ "github_topic_closest_fit": "lean",
1205
+ "contributors_all": 2,
1206
+ "contributors_2025": 2,
1207
+ "contributors_2024": 0,
1208
+ "contributors_2023": 0
1209
+ },
1210
+ {
1211
+ "repo_name": "RaBitQ",
1212
+ "repo_link": "https://github.com/gaoj0017/RaBitQ",
1213
+ "category": "quantization",
1214
+ "github_about_section": "[SIGMOD 2024] RaBitQ: Quantizing High-Dimensional Vectors with a Theoretical Error Bound for Approximate Nearest Neighbor Search",
1215
+ "homepage_link": "https://github.com/VectorDB-NTU/RaBitQ-Library",
1216
+ "github_topic_closest_fit": "nearest-neighbor-search",
1217
+ "contributors_all": 2,
1218
+ "contributors_2025": 2,
1219
+ "contributors_2024": 1,
1220
+ "contributors_2023": 0
1221
+ },
1222
+ {
1223
+ "repo_name": "torchdendrite",
1224
+ "repo_link": "https://github.com/sandialabs/torchdendrite",
1225
+ "category": "machine learning framework",
1226
+ "github_about_section": "Dendrites for PyTorch and SNNTorch neural networks",
1227
+ "contributors_all": 2,
1228
+ "contributors_2025": 1,
1229
+ "contributors_2024": 1,
1230
+ "contributors_2023": 0
1231
+ },
1232
+ {
1233
+ "repo_name": "triton-runner",
1234
+ "repo_link": "https://github.com/toyaix/triton-runner",
1235
+ "category": "debugger",
1236
+ "github_about_section": "Multi-Level Triton Runner supporting Python, IR, PTX, and cubin.",
1237
+ "homepage_link": "https://triton-runner.org",
1238
+ "contributors_all": 1,
1239
+ "contributors_2025": 1,
1240
+ "contributors_2024": 0,
1241
+ "contributors_2023": 0
1242
+ },
1243
+ {
1244
+ "repo_name": "nvcc4jupyter",
1245
+ "category": "middleware",
1246
+ "repo_link": "https://github.com/andreinechaev/nvcc4jupyter",
1247
+ "github_about_section": "A plugin for Jupyter Notebook to run CUDA C/C++ code",
1248
+ "homepage_link": "https://nvcc4jupyter.readthedocs.io",
1249
+ "contributors_all": 9,
1250
+ "contributors_2025": 0,
1251
+ "contributors_2024": 3,
1252
+ "contributors_2023": 3
1253
+ },
1254
+ {
1255
+ "repo_name": "CU2CL",
1256
+ "repo_link": "https://github.com/vtsynergy/CU2CL",
1257
+ "category": "source-to-source translator",
1258
+ "github_about_section": "A prototype CUDA-to-OpenCL source-to-source translator, built on the Clang compiler framework",
1259
+ "homepage_link": "http://chrec.cs.vt.edu/cu2cl",
1260
+ "github_topic_closest_fit": "parallel-programming",
1261
+ "contributors_all": 3,
1262
+ "contributors_2025": 0,
1263
+ "contributors_2024": 0,
1264
+ "contributors_2023": 0
1265
+ },
1266
+ {
1267
+ "repo_name": "triSYCL",
1268
+ "repo_link": "https://github.com/triSYCL/triSYCL",
1269
+ "category": "parallel computing",
1270
+ "github_about_section": "Generic system-wide modern C++ for heterogeneous platforms with SYCL from Khronos Group",
1271
+ "homepage_link": "https://trisycl.github.io/triSYCL/Doxygen/triSYCL/html/index.html",
1272
+ "github_topic_closest_fit": "parallel-programming",
1273
+ "contributors_all": 31,
1274
+ "contributors_2025": 0,
1275
+ "contributors_2024": 1,
1276
+ "contributors_2023": 3
1277
+ },
1278
+ {
1279
+ "repo_name": "StreamDiffusion",
1280
+ "repo_link": "https://github.com/cumulo-autumn/StreamDiffusion",
1281
+ "category": "image generation",
1282
+ "github_about_section": "StreamDiffusion: A Pipeline-Level Solution for Real-Time Interactive Generation",
1283
+ "homepage_link": "https://arxiv.org/abs/2312.12491",
1284
+ "github_topic_closest_fit": "diffusion-models",
1285
+ "contributors_all": 29,
1286
+ "contributors_2025": 0,
1287
+ "contributors_2024": 9,
1288
+ "contributors_2023": 25
1289
+ },
1290
+ {
1291
+ "repo_name": "wandb",
1292
+ "repo_link": "https://github.com/wandb/wandb",
1293
+ "github_about_section": "The AI developer platform. Use Weights & Biases to train and fine-tune models, and manage models from experimentation to production.",
1294
+ "homepage_link": "https://wandb.ai"
1295
+ },
1296
+ {
1297
+ "repo_name": "aws-neuron-sdk",
1298
+ "repo_link": "https://github.com/aws-neuron/aws-neuron-sdk",
1299
+ "github_about_section": "Powering AWS purpose-built machine learning chips. Blazing fast and cost effective, natively integrated into PyTorch and TensorFlow and integrated with your favorite AWS services",
1300
+ "homepage_link": "https://aws.amazon.com/ai/machine-learning/neuron"
1301
+ },
1302
+ {
1303
+ "repo_name": "onnxruntime",
1304
+ "repo_link": "https://github.com/microsoft/onnxruntime",
1305
+ "github_about_section": "ONNX Runtime: cross-platform, high performance ML inferencing and training accelerator",
1306
+ "homepage_link": "https://onnxruntime.ai"
1307
+ },
1308
+ {
1309
+ "repo_name": "ort",
1310
+ "repo_link": "https://github.com/pykeio/ort",
1311
+ "github_about_section": "Fast ML inference & training for ONNX models in Rust",
1312
+ "homepage_link": "https://ort.pyke.io"
1313
+ },
1314
+ {
1315
+ "repo_name": "Triton-distributed",
1316
+ "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
1317
+ "github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
1318
+ "homepage_link": "https://triton-distributed.readthedocs.io"
1319
+ },
1320
+ {
1321
+ "repo_name": "gemlite",
1322
+ "repo_link": "https://github.com/dropbox/gemlite",
1323
+ "github_about_section": "Fast low-bit matmul kernels in Triton"
1324
+ },
1325
+ {
1326
+ "repo_name": "cutile-python",
1327
+ "repo_link": "https://github.com/NVIDIA/cutile-python",
1328
+ "github_about_section": "cuTile is a programming model for writing parallel kernels for NVIDIA GPUs",
1329
+ "homepage_link": "https://docs.nvidia.com/cuda/cutile-python"
1330
+ },
1331
+ {
1332
+ "repo_name": "tilus",
1333
+ "repo_link": "https://github.com/NVIDIA/tilus",
1334
+ "github_about_section": "Tilus is a tile-level kernel programming language with explicit control over shared memory and registers.",
1335
+ "homepage_link": "https://nvidia.github.io/tilus"
1336
+ },
1337
+ {
1338
+ "repo_name": "triton-windows",
1339
+ "repo_link": "https://github.com/woct0rdho/triton-windows",
1340
+ "github_about_section": "Fork of the Triton language and compiler for Windows support and easy installation"
1341
+ },
1342
+ {
1343
+ "repo_name": "triton-windows",
1344
+ "repo_link": "https://github.com/triton-lang/triton-windows",
1345
+ "github_about_section": "Triton with Windows support",
1346
+ "homepage_link": "https://triton-lang.org"
1347
+ },
1348
+ {
1349
+ "repo_name": "flash-linear-attention",
1350
+ "repo_link": "https://github.com/fla-org/flash-linear-attention",
1351
+ "github_about_section": "Efficient implementations of state-of-the-art linear attention models"
1352
+ }
1353
+ ]