TylerHilbert commited on
Commit
9f15961
·
1 Parent(s): 5b79304

Finished categories for later added repos.

Browse files
PyTorchConference2025_GithubRepos.json CHANGED
@@ -1072,64 +1072,68 @@
1072
  {
1073
  "repo_name": "wandb",
1074
  "repo_link": "https://github.com/wandb/wandb",
 
1075
  "github_about_section": "The AI developer platform. Use Weights & Biases to train and fine-tune models, and manage models from experimentation to production.",
1076
  "homepage_link": "https://wandb.ai"
1077
  },
1078
  {
1079
  "repo_name": "aws-neuron-sdk",
1080
  "repo_link": "https://github.com/aws-neuron/aws-neuron-sdk",
 
1081
  "github_about_section": "Powering AWS purpose-built machine learning chips. Blazing fast and cost effective, natively integrated into PyTorch and TensorFlow and integrated with your favorite AWS services",
1082
  "homepage_link": "https://aws.amazon.com/ai/machine-learning/neuron"
1083
  },
1084
  {
1085
  "repo_name": "onnxruntime",
1086
  "repo_link": "https://github.com/microsoft/onnxruntime",
 
1087
  "github_about_section": "ONNX Runtime: cross-platform, high performance ML inferencing and training accelerator",
1088
  "homepage_link": "https://onnxruntime.ai"
1089
  },
1090
  {
1091
  "repo_name": "ort",
1092
  "repo_link": "https://github.com/pykeio/ort",
 
1093
  "github_about_section": "Fast ML inference & training for ONNX models in Rust",
1094
  "homepage_link": "https://ort.pyke.io"
1095
  },
1096
  {
1097
  "repo_name": "Triton-distributed",
1098
  "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
 
1099
  "github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
1100
  "homepage_link": "https://triton-distributed.readthedocs.io"
1101
  },
1102
  {
1103
  "repo_name": "gemlite",
1104
  "repo_link": "https://github.com/dropbox/gemlite",
 
1105
  "github_about_section": "Fast low-bit matmul kernels in Triton"
1106
  },
1107
  {
1108
  "repo_name": "cutile-python",
1109
  "repo_link": "https://github.com/NVIDIA/cutile-python",
 
1110
  "github_about_section": "cuTile is a programming model for writing parallel kernels for NVIDIA GPUs",
1111
  "homepage_link": "https://docs.nvidia.com/cuda/cutile-python"
1112
  },
1113
  {
1114
  "repo_name": "tilus",
1115
  "repo_link": "https://github.com/NVIDIA/tilus",
 
1116
  "github_about_section": "Tilus is a tile-level kernel programming language with explicit control over shared memory and registers.",
1117
  "homepage_link": "https://nvidia.github.io/tilus"
1118
  },
1119
  {
1120
  "repo_name": "triton-windows",
1121
  "repo_link": "https://github.com/woct0rdho/triton-windows",
 
1122
  "github_about_section": "Fork of the Triton language and compiler for Windows support and easy installation"
1123
  },
1124
- {
1125
- "repo_name": "triton-windows",
1126
- "repo_link": "https://github.com/triton-lang/triton-windows",
1127
- "github_about_section": "Triton with Windows support",
1128
- "homepage_link": "https://triton-lang.org"
1129
- },
1130
  {
1131
  "repo_name": "flash-linear-attention",
1132
  "repo_link": "https://github.com/fla-org/flash-linear-attention",
 
1133
  "github_about_section": "Efficient implementations of state-of-the-art linear attention models"
1134
  }
1135
  ]
 
1072
  {
1073
  "repo_name": "wandb",
1074
  "repo_link": "https://github.com/wandb/wandb",
1075
+ "category": "ml visualization",
1076
  "github_about_section": "The AI developer platform. Use Weights & Biases to train and fine-tune models, and manage models from experimentation to production.",
1077
  "homepage_link": "https://wandb.ai"
1078
  },
1079
  {
1080
  "repo_name": "aws-neuron-sdk",
1081
  "repo_link": "https://github.com/aws-neuron/aws-neuron-sdk",
1082
+ "category": "sdk",
1083
  "github_about_section": "Powering AWS purpose-built machine learning chips. Blazing fast and cost effective, natively integrated into PyTorch and TensorFlow and integrated with your favorite AWS services",
1084
  "homepage_link": "https://aws.amazon.com/ai/machine-learning/neuron"
1085
  },
1086
  {
1087
  "repo_name": "onnxruntime",
1088
  "repo_link": "https://github.com/microsoft/onnxruntime",
1089
+ "category": "machine learning interoperability",
1090
  "github_about_section": "ONNX Runtime: cross-platform, high performance ML inferencing and training accelerator",
1091
  "homepage_link": "https://onnxruntime.ai"
1092
  },
1093
  {
1094
  "repo_name": "ort",
1095
  "repo_link": "https://github.com/pykeio/ort",
1096
+ "category": "machine learning interoperability",
1097
  "github_about_section": "Fast ML inference & training for ONNX models in Rust",
1098
  "homepage_link": "https://ort.pyke.io"
1099
  },
1100
  {
1101
  "repo_name": "Triton-distributed",
1102
  "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
1103
+ "category": "distributed computing",
1104
  "github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
1105
  "homepage_link": "https://triton-distributed.readthedocs.io"
1106
  },
1107
  {
1108
  "repo_name": "gemlite",
1109
  "repo_link": "https://github.com/dropbox/gemlite",
1110
+ "category": "gpu kernels",
1111
  "github_about_section": "Fast low-bit matmul kernels in Triton"
1112
  },
1113
  {
1114
  "repo_name": "cutile-python",
1115
  "repo_link": "https://github.com/NVIDIA/cutile-python",
1116
+ "category": "parallel computing",
1117
  "github_about_section": "cuTile is a programming model for writing parallel kernels for NVIDIA GPUs",
1118
  "homepage_link": "https://docs.nvidia.com/cuda/cutile-python"
1119
  },
1120
  {
1121
  "repo_name": "tilus",
1122
  "repo_link": "https://github.com/NVIDIA/tilus",
1123
+ "category": "parallel computing",
1124
  "github_about_section": "Tilus is a tile-level kernel programming language with explicit control over shared memory and registers.",
1125
  "homepage_link": "https://nvidia.github.io/tilus"
1126
  },
1127
  {
1128
  "repo_name": "triton-windows",
1129
  "repo_link": "https://github.com/woct0rdho/triton-windows",
1130
+ "category": "parallel computing dsl",
1131
  "github_about_section": "Fork of the Triton language and compiler for Windows support and easy installation"
1132
  },
 
 
 
 
 
 
1133
  {
1134
  "repo_name": "flash-linear-attention",
1135
  "repo_link": "https://github.com/fla-org/flash-linear-attention",
1136
+ "category": "gpu kernels",
1137
  "github_about_section": "Efficient implementations of state-of-the-art linear attention models"
1138
  }
1139
  ]