TylerHilbert commited on
Commit
00feb54
·
1 Parent(s): edac097

Cleaned homepage_link formatting. Updated DeepSpeed. Added missing Vulkan repos

Browse files
PyTorchConference2025_GithubRepos.json CHANGED
@@ -4,7 +4,7 @@
4
  "repo_link": "https://github.com/block/goose",
5
  "category": "agent",
6
  "github_about_section": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM",
7
- "homepage_link": "https://block.github.io/goose/",
8
  "github_topic_closest_fit": "mcp"
9
  },
10
  {
@@ -28,7 +28,7 @@
28
  "repo_link": "https://github.com/ScalingIntelligence/KernelBench",
29
  "category": "benchmark",
30
  "github_about_section": "KernelBench: Can LLMs Write GPU Kernels? - Benchmark with Torch -> CUDA problems",
31
- "homepage_link": "https://scalingintelligence.stanford.edu/blogs/kernelbench/",
32
  "github_topic_closest_fit": "benchmark"
33
  },
34
  {
@@ -36,7 +36,7 @@
36
  "repo_link": "https://github.com/SWE-bench/SWE-bench",
37
  "category": "benchmark",
38
  "github_about_section": "SWE-bench: Can Language Models Resolve Real-world Github Issues?",
39
- "homepage_link": "https://www.swebench.com",
40
  "github_topic_closest_fit": "benchmark"
41
  },
42
  {
@@ -44,7 +44,7 @@
44
  "repo_link": "https://github.com/laude-institute/terminal-bench",
45
  "category": "benchmark",
46
  "github_about_section": "A benchmark for LLMs on complicated tasks in the terminal",
47
- "homepage_link": "https://www.tbench.ai",
48
  "github_topic_closest_fit": "benchmark"
49
  },
50
  {
@@ -52,6 +52,7 @@
52
  "repo_link": "https://github.com/thunlp/TritonBench",
53
  "category": "benchmark",
54
  "github_about_section": "TritonBench: Benchmarking Large Language Model Capabilities for Generating Triton Operators",
 
55
  "github_topic_closest_fit": "benchmark"
56
  },
57
  {
@@ -73,7 +74,7 @@
73
  "repo_link": "https://github.com/AMD-AGI/hipBLASLt",
74
  "category": "Basic Linear Algebra Subprograms (BLAS)",
75
  "github_about_section": "hipBLASLt is a library that provides general matrix-matrix operations with a flexible API and extends functionalities beyond a traditional BLAS library",
76
- "homepage_link": "https://rocm.docs.amd.com/projects/hipBLASLt/en/latest/index.html",
77
  "github_topic_closest_fit": "matrix-multiplication"
78
  },
79
  {
@@ -119,7 +120,7 @@
119
  "repo_link": "https://github.com/NVIDIA/cuda-python",
120
  "category": "CUDA / OpenCL",
121
  "github_about_section": "CUDA Python: Performance meets Productivity",
122
- "homepage_link": "https://nvidia.github.io/cuda-python/"
123
  },
124
  {
125
  "repo_name": "OpenCL-SDK",
@@ -155,7 +156,7 @@
155
  "repo_link": "https://github.com/vosen/ZLUDA",
156
  "category": "CUDA / OpenCL",
157
  "github_about_section": "CUDA on non-NVIDIA GPUs",
158
- "homepage_link": "https://vosen.github.io/ZLUDA/",
159
  "github_topic_closest_fit": "cuda"
160
  },
161
  {
@@ -171,7 +172,7 @@
171
  "repo_link": "https://github.com/mistralai/mistral-inference",
172
  "category": "inference engine",
173
  "github_about_section": "Official inference library for Mistral models",
174
- "homepage_link": "https://mistral.ai/",
175
  "github_topic_closest_fit": "llm-inference"
176
  },
177
  {
@@ -322,7 +323,7 @@
322
  "repo_link": "https://github.com/triton-lang/triton",
323
  "category": "parallel computing dsl",
324
  "github_about_section": "Development repository for the Triton language and compiler",
325
- "homepage_link": "https://triton-lang.org/",
326
  "github_topic_closest_fit": "parallel-programming"
327
  },
328
  {
@@ -353,7 +354,7 @@
353
  "repo_link": "https://github.com/ROCm/omnitrace",
354
  "category": "performance testing",
355
  "github_about_section": "Omnitrace: Application Profiling, Tracing, and Analysis",
356
- "homepage_link": "https://rocm.docs.amd.com/projects/omnitrace/en/docs-6.2.4",
357
  "github_topic_closest_fit": "profiling"
358
  },
359
  {
@@ -385,7 +386,7 @@
385
  "repo_link": "https://github.com/elastic/elasticsearch",
386
  "category": "search engine",
387
  "github_about_section": "Free and Open Source, Distributed, RESTful Search Engine",
388
- "homepage_link": "https://www.elastic.co/products/elasticsearch",
389
  "github_topic_closest_fit": "search-engine"
390
  },
391
  {
@@ -393,7 +394,7 @@
393
  "repo_link": "https://github.com/jupyterlab/jupyterlab",
394
  "category": "user interface",
395
  "github_about_section": "JupyterLab computational environment.",
396
- "homepage_link": "https://jupyterlab.readthedocs.io/",
397
  "github_topic_closest_fit": "jupyter"
398
  },
399
  {
@@ -424,7 +425,7 @@
424
  "repo_name": "ao",
425
  "repo_link": "https://github.com/pytorch/ao",
426
  "github_about_section": "PyTorch native quantization and sparsity for training and inference",
427
- "homepage_link": "https://pytorch.org/ao/stable/index.html",
428
  "github_topic_closest_fit": "quantization"
429
  },
430
  {
@@ -444,14 +445,14 @@
444
  "repo_name": "ComfyUI",
445
  "repo_link": "https://github.com/comfyanonymous/ComfyUI",
446
  "github_about_section": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.",
447
- "homepage_link": "https://www.comfy.org/",
448
  "github_topic_closest_fit": "stable-diffusion"
449
  },
450
  {
451
  "repo_name": "composable_kernel",
452
  "repo_link": "https://github.com/ROCm/composable_kernel",
453
  "github_about_section": "Composable Kernel: Performance Portable Programming Model for Machine Learning Tensor Operators",
454
- "homepage_link": "https://rocm.docs.amd.com/projects/composable_kernel/en/latest/"
455
  },
456
  {
457
  "repo_name": "cudnn-frontend",
@@ -466,9 +467,10 @@
466
  {
467
  "repo_name": "DeepSpeed",
468
  "repo_link": "https://github.com/deepspeedai/DeepSpeed",
 
469
  "github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
470
- "homepage_link": "https://www.deepspeed.ai/",
471
- "github_topic_closest_fit": "gpu"
472
  },
473
  {
474
  "repo_name": "dstack",
@@ -488,7 +490,7 @@
488
  "repo_name": "FTorch",
489
  "repo_link": "https://github.com/Cambridge-ICCS/FTorch",
490
  "github_about_section": "A library for directly calling PyTorch ML models from Fortran.",
491
- "homepage_link": "https://cambridge-iccs.github.io/FTorch/",
492
  "github_topic_closest_fit": "machine-learning"
493
  },
494
  {
@@ -507,7 +509,7 @@
507
  "repo_name": "hip",
508
  "repo_link": "https://github.com/ROCm/hip",
509
  "github_about_section": "HIP: C++ Heterogeneous-Compute Interface for Portability",
510
- "homepage_link": "https://rocmdocs.amd.com/projects/HIP/",
511
  "github_topic_closest_fit": "hip"
512
  },
513
  {
@@ -544,7 +546,7 @@
544
  "repo_name": "letta",
545
  "repo_link": "https://github.com/letta-ai/letta",
546
  "github_about_section": "Letta is the platform for building stateful agents: open AI with advanced memory that can learn and self-improve over time.",
547
- "homepage_link": "https://docs.letta.com/",
548
  "github_topic_closest_fit": "ai-agents"
549
  },
550
  {
@@ -556,7 +558,7 @@
556
  "repo_name": "LMCache",
557
  "repo_link": "https://github.com/LMCache/LMCache",
558
  "github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
559
- "homepage_link": "https://lmcache.ai/",
560
  "github_topic_closest_fit": "inference"
561
  },
562
  {
@@ -593,7 +595,7 @@
593
  "repo_name": "modular",
594
  "repo_link": "https://github.com/modular/modular",
595
  "github_about_section": "The Modular Platform (includes MAX & Mojo)",
596
- "homepage_link": "https://docs.modular.com/",
597
  "github_topic_closest_fit": "mojo"
598
  },
599
  {
@@ -606,7 +608,7 @@
606
  "repo_name": "Mooncake",
607
  "repo_link": "https://github.com/kvcache-ai/Mooncake",
608
  "github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
609
- "homepage_link": "https://kvcache-ai.github.io/Mooncake/",
610
  "github_topic_closest_fit": "inference"
611
  },
612
  {
@@ -628,14 +630,14 @@
628
  "repo_name": "ome",
629
  "repo_link": "https://github.com/sgl-project/ome",
630
  "github_about_section": "OME is a Kubernetes operator for enterprise-grade management and serving of Large Language Models (LLMs)",
631
- "homepage_link": "http://docs.sglang.ai/ome/",
632
  "github_topic_closest_fit": "k8s"
633
  },
634
  {
635
  "repo_name": "ondemand",
636
  "repo_link": "https://github.com/OSC/ondemand",
637
  "github_about_section": "Supercomputing. Seamlessly. Open, Interactive HPC Via the Web",
638
- "homepage_link": "https://openondemand.org/",
639
  "github_topic_closest_fit": "hpc"
640
  },
641
  {
@@ -670,7 +672,7 @@
670
  "repo_name": "pybind11",
671
  "repo_link": "https://github.com/pybind/pybind11",
672
  "github_about_section": "Seamless operability between C++11 and Python",
673
- "homepage_link": "https://pybind11.readthedocs.io/",
674
  "github_topic_closest_fit": "bindings"
675
  },
676
  {
@@ -754,7 +756,7 @@
754
  "repo_name": "spark",
755
  "repo_link": "https://github.com/apache/spark",
756
  "github_about_section": "Apache Spark - A unified analytics engine for large-scale data processing",
757
- "homepage_link": "https://spark.apache.org/",
758
  "github_topic_closest_fit": "big-data"
759
  },
760
  {
@@ -766,13 +768,13 @@
766
  "repo_name": "streamv2v",
767
  "repo_link": "https://github.com/Jeff-LiangF/streamv2v",
768
  "github_about_section": "Official Pytorch implementation of StreamV2V.",
769
- "homepage_link": "https://jeff-liangf.github.io/projects/streamv2v/"
770
  },
771
  {
772
  "repo_name": "synthetic-data-kit",
773
  "repo_link": "https://github.com/meta-llama/synthetic-data-kit",
774
  "github_about_section": "Tool for generating high quality Synthetic datasets",
775
- "homepage_link": "https://pypi.org/project/synthetic-data-kit/",
776
  "github_topic_closest_fit": "generation"
777
  },
778
  {
@@ -812,7 +814,7 @@
812
  "repo_name": "Triton-distributed",
813
  "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
814
  "github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
815
- "homepage_link": "https://triton-distributed.readthedocs.io/en/latest"
816
  },
817
  {
818
  "repo_name": "triton-runner",
@@ -825,7 +827,7 @@
825
  "repo_name": "tritonparse",
826
  "repo_link": "https://github.com/meta-pytorch/tritonparse",
827
  "github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
828
- "homepage_link": "https://meta-pytorch.org/tritonparse/",
829
  "github_topic_closest_fit": "triton"
830
  },
831
  {
@@ -851,12 +853,31 @@
851
  "repo_name": "verl",
852
  "repo_link": "https://github.com/volcengine/verl",
853
  "github_about_section": "verl: Volcano Engine Reinforcement Learning for LLMs",
854
- "homepage_link": "https://verl.readthedocs.io/en/latest/index.html"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
855
  },
856
  {
857
  "repo_name": "Vulkan-Docs",
858
  "repo_link": "https://github.com/KhronosGroup/Vulkan-Docs",
859
- "github_about_section": "The Vulkan API Specification and related tools"
 
 
 
860
  },
861
  {
862
  "repo_name": "Wan2.2",
 
4
  "repo_link": "https://github.com/block/goose",
5
  "category": "agent",
6
  "github_about_section": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM",
7
+ "homepage_link": "https://block.github.io/goose",
8
  "github_topic_closest_fit": "mcp"
9
  },
10
  {
 
28
  "repo_link": "https://github.com/ScalingIntelligence/KernelBench",
29
  "category": "benchmark",
30
  "github_about_section": "KernelBench: Can LLMs Write GPU Kernels? - Benchmark with Torch -> CUDA problems",
31
+ "homepage_link": "https://scalingintelligence.stanford.edu/blogs/kernelbench",
32
  "github_topic_closest_fit": "benchmark"
33
  },
34
  {
 
36
  "repo_link": "https://github.com/SWE-bench/SWE-bench",
37
  "category": "benchmark",
38
  "github_about_section": "SWE-bench: Can Language Models Resolve Real-world Github Issues?",
39
+ "homepage_link": "https://swebench.com",
40
  "github_topic_closest_fit": "benchmark"
41
  },
42
  {
 
44
  "repo_link": "https://github.com/laude-institute/terminal-bench",
45
  "category": "benchmark",
46
  "github_about_section": "A benchmark for LLMs on complicated tasks in the terminal",
47
+ "homepage_link": "https://tbench.ai",
48
  "github_topic_closest_fit": "benchmark"
49
  },
50
  {
 
52
  "repo_link": "https://github.com/thunlp/TritonBench",
53
  "category": "benchmark",
54
  "github_about_section": "TritonBench: Benchmarking Large Language Model Capabilities for Generating Triton Operators",
55
+ "homepage_link": "https://arxiv.org/abs/2502.14752",
56
  "github_topic_closest_fit": "benchmark"
57
  },
58
  {
 
74
  "repo_link": "https://github.com/AMD-AGI/hipBLASLt",
75
  "category": "Basic Linear Algebra Subprograms (BLAS)",
76
  "github_about_section": "hipBLASLt is a library that provides general matrix-matrix operations with a flexible API and extends functionalities beyond a traditional BLAS library",
77
+ "homepage_link": "https://rocm.docs.amd.com/projects/hipBLASLt",
78
  "github_topic_closest_fit": "matrix-multiplication"
79
  },
80
  {
 
120
  "repo_link": "https://github.com/NVIDIA/cuda-python",
121
  "category": "CUDA / OpenCL",
122
  "github_about_section": "CUDA Python: Performance meets Productivity",
123
+ "homepage_link": "https://nvidia.github.io/cuda-python"
124
  },
125
  {
126
  "repo_name": "OpenCL-SDK",
 
156
  "repo_link": "https://github.com/vosen/ZLUDA",
157
  "category": "CUDA / OpenCL",
158
  "github_about_section": "CUDA on non-NVIDIA GPUs",
159
+ "homepage_link": "https://vosen.github.io/ZLUDA",
160
  "github_topic_closest_fit": "cuda"
161
  },
162
  {
 
172
  "repo_link": "https://github.com/mistralai/mistral-inference",
173
  "category": "inference engine",
174
  "github_about_section": "Official inference library for Mistral models",
175
+ "homepage_link": "https://mistral.ai",
176
  "github_topic_closest_fit": "llm-inference"
177
  },
178
  {
 
323
  "repo_link": "https://github.com/triton-lang/triton",
324
  "category": "parallel computing dsl",
325
  "github_about_section": "Development repository for the Triton language and compiler",
326
+ "homepage_link": "https://triton-lang.org",
327
  "github_topic_closest_fit": "parallel-programming"
328
  },
329
  {
 
354
  "repo_link": "https://github.com/ROCm/omnitrace",
355
  "category": "performance testing",
356
  "github_about_section": "Omnitrace: Application Profiling, Tracing, and Analysis",
357
+ "homepage_link": "https://rocm.docs.amd.com/projects/omnitrace",
358
  "github_topic_closest_fit": "profiling"
359
  },
360
  {
 
386
  "repo_link": "https://github.com/elastic/elasticsearch",
387
  "category": "search engine",
388
  "github_about_section": "Free and Open Source, Distributed, RESTful Search Engine",
389
+ "homepage_link": "https://elastic.co/products/elasticsearch",
390
  "github_topic_closest_fit": "search-engine"
391
  },
392
  {
 
394
  "repo_link": "https://github.com/jupyterlab/jupyterlab",
395
  "category": "user interface",
396
  "github_about_section": "JupyterLab computational environment.",
397
+ "homepage_link": "https://jupyterlab.readthedocs.io",
398
  "github_topic_closest_fit": "jupyter"
399
  },
400
  {
 
425
  "repo_name": "ao",
426
  "repo_link": "https://github.com/pytorch/ao",
427
  "github_about_section": "PyTorch native quantization and sparsity for training and inference",
428
+ "homepage_link": "https://pytorch.org/ao",
429
  "github_topic_closest_fit": "quantization"
430
  },
431
  {
 
445
  "repo_name": "ComfyUI",
446
  "repo_link": "https://github.com/comfyanonymous/ComfyUI",
447
  "github_about_section": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.",
448
+ "homepage_link": "https://comfy.org",
449
  "github_topic_closest_fit": "stable-diffusion"
450
  },
451
  {
452
  "repo_name": "composable_kernel",
453
  "repo_link": "https://github.com/ROCm/composable_kernel",
454
  "github_about_section": "Composable Kernel: Performance Portable Programming Model for Machine Learning Tensor Operators",
455
+ "homepage_link": "https://rocm.docs.amd.com/projects/composable_kernel"
456
  },
457
  {
458
  "repo_name": "cudnn-frontend",
 
467
  {
468
  "repo_name": "DeepSpeed",
469
  "repo_link": "https://github.com/deepspeedai/DeepSpeed",
470
+ "category": "training framework",
471
  "github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
472
+ "homepage_link": "https://deepspeed.ai",
473
+ "github_topic_closest_fit": "gpu-acceleration"
474
  },
475
  {
476
  "repo_name": "dstack",
 
490
  "repo_name": "FTorch",
491
  "repo_link": "https://github.com/Cambridge-ICCS/FTorch",
492
  "github_about_section": "A library for directly calling PyTorch ML models from Fortran.",
493
+ "homepage_link": "https://cambridge-iccs.github.io/FTorch",
494
  "github_topic_closest_fit": "machine-learning"
495
  },
496
  {
 
509
  "repo_name": "hip",
510
  "repo_link": "https://github.com/ROCm/hip",
511
  "github_about_section": "HIP: C++ Heterogeneous-Compute Interface for Portability",
512
+ "homepage_link": "https://rocmdocs.amd.com/projects/HIP",
513
  "github_topic_closest_fit": "hip"
514
  },
515
  {
 
546
  "repo_name": "letta",
547
  "repo_link": "https://github.com/letta-ai/letta",
548
  "github_about_section": "Letta is the platform for building stateful agents: open AI with advanced memory that can learn and self-improve over time.",
549
+ "homepage_link": "https://docs.letta.com",
550
  "github_topic_closest_fit": "ai-agents"
551
  },
552
  {
 
558
  "repo_name": "LMCache",
559
  "repo_link": "https://github.com/LMCache/LMCache",
560
  "github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
561
+ "homepage_link": "https://lmcache.ai",
562
  "github_topic_closest_fit": "inference"
563
  },
564
  {
 
595
  "repo_name": "modular",
596
  "repo_link": "https://github.com/modular/modular",
597
  "github_about_section": "The Modular Platform (includes MAX & Mojo)",
598
+ "homepage_link": "https://docs.modular.com",
599
  "github_topic_closest_fit": "mojo"
600
  },
601
  {
 
608
  "repo_name": "Mooncake",
609
  "repo_link": "https://github.com/kvcache-ai/Mooncake",
610
  "github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
611
+ "homepage_link": "https://kvcache-ai.github.io/Mooncake",
612
  "github_topic_closest_fit": "inference"
613
  },
614
  {
 
630
  "repo_name": "ome",
631
  "repo_link": "https://github.com/sgl-project/ome",
632
  "github_about_section": "OME is a Kubernetes operator for enterprise-grade management and serving of Large Language Models (LLMs)",
633
+ "homepage_link": "http://docs.sglang.ai/ome",
634
  "github_topic_closest_fit": "k8s"
635
  },
636
  {
637
  "repo_name": "ondemand",
638
  "repo_link": "https://github.com/OSC/ondemand",
639
  "github_about_section": "Supercomputing. Seamlessly. Open, Interactive HPC Via the Web",
640
+ "homepage_link": "https://openondemand.org",
641
  "github_topic_closest_fit": "hpc"
642
  },
643
  {
 
672
  "repo_name": "pybind11",
673
  "repo_link": "https://github.com/pybind/pybind11",
674
  "github_about_section": "Seamless operability between C++11 and Python",
675
+ "homepage_link": "https://pybind11.readthedocs.io",
676
  "github_topic_closest_fit": "bindings"
677
  },
678
  {
 
756
  "repo_name": "spark",
757
  "repo_link": "https://github.com/apache/spark",
758
  "github_about_section": "Apache Spark - A unified analytics engine for large-scale data processing",
759
+ "homepage_link": "https://spark.apache.org",
760
  "github_topic_closest_fit": "big-data"
761
  },
762
  {
 
768
  "repo_name": "streamv2v",
769
  "repo_link": "https://github.com/Jeff-LiangF/streamv2v",
770
  "github_about_section": "Official Pytorch implementation of StreamV2V.",
771
+ "homepage_link": "https://jeff-liangf.github.io/projects/streamv2v"
772
  },
773
  {
774
  "repo_name": "synthetic-data-kit",
775
  "repo_link": "https://github.com/meta-llama/synthetic-data-kit",
776
  "github_about_section": "Tool for generating high quality Synthetic datasets",
777
+ "homepage_link": "https://pypi.org/project/synthetic-data-kit",
778
  "github_topic_closest_fit": "generation"
779
  },
780
  {
 
814
  "repo_name": "Triton-distributed",
815
  "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
816
  "github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
817
+ "homepage_link": "https://triton-distributed.readthedocs.io"
818
  },
819
  {
820
  "repo_name": "triton-runner",
 
827
  "repo_name": "tritonparse",
828
  "repo_link": "https://github.com/meta-pytorch/tritonparse",
829
  "github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
830
+ "homepage_link": "https://meta-pytorch.org/tritonparse",
831
  "github_topic_closest_fit": "triton"
832
  },
833
  {
 
853
  "repo_name": "verl",
854
  "repo_link": "https://github.com/volcengine/verl",
855
  "github_about_section": "verl: Volcano Engine Reinforcement Learning for LLMs",
856
+ "homepage_link": "https://verl.readthedocs.io"
857
+ },
858
+ {
859
+ "repo_name": "Vulkan-Hpp",
860
+ "repo_link": "https://github.com/KhronosGroup/Vulkan-Hpp",
861
+ "category": "graphics api",
862
+ "github_about_section": "Open-Source Vulkan C++ API",
863
+ "homepage_link": "https://vulkan.org",
864
+ "github_topic_closest_fit": "vulkan"
865
+ },
866
+ {
867
+ "repo_name": "Vulkan-Tools",
868
+ "repo_link": "https://github.com/KhronosGroup/Vulkan-Tools",
869
+ "category": "graphics api",
870
+ "github_about_section": "Vulkan Development Tools",
871
+ "homepage_link": "https://vulkan.org",
872
+ "github_topic_closest_fit": "vulkan"
873
  },
874
  {
875
  "repo_name": "Vulkan-Docs",
876
  "repo_link": "https://github.com/KhronosGroup/Vulkan-Docs",
877
+ "category": "graphics api",
878
+ "github_about_section": "The Vulkan API Specification and related tools",
879
+ "homepage_link": "https://vulkan.org",
880
+ "github_topic_closest_fit": "vulkan"
881
  },
882
  {
883
  "repo_name": "Wan2.2",