Upload config.json
Browse files- config.json +39 -0
config.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"repo_type": "model",
|
| 3 |
+
"project_name": "Decode AI",
|
| 4 |
+
"recommended_upload_files": [
|
| 5 |
+
"README.md",
|
| 6 |
+
"main.py",
|
| 7 |
+
"decode_moe.py",
|
| 8 |
+
"requirements.txt",
|
| 9 |
+
"checkpoints/decode_latest.safetensors"
|
| 10 |
+
],
|
| 11 |
+
"optional_upload_files": [
|
| 12 |
+
"checkpoints/decode_latest.pt",
|
| 13 |
+
"dataset/prompts.csv"
|
| 14 |
+
],
|
| 15 |
+
"local_model_info": {
|
| 16 |
+
"architecture": "Custom decoder-style multimodal MoE",
|
| 17 |
+
"tokenizer": "ai-tokenizer:openai/gpt-5",
|
| 18 |
+
"checkpoint_format": "PyTorch checkpoint (.pt) and safetensors export",
|
| 19 |
+
"custom_code_required": true
|
| 20 |
+
},
|
| 21 |
+
"checkpoint_metadata_written_by_export": [
|
| 22 |
+
"source_checkpoint",
|
| 23 |
+
"total_params",
|
| 24 |
+
"global_step",
|
| 25 |
+
"epoch",
|
| 26 |
+
"tokenizer_name",
|
| 27 |
+
"model_config_json"
|
| 28 |
+
],
|
| 29 |
+
"upload_steps": [
|
| 30 |
+
"Install dependencies from requirements.txt",
|
| 31 |
+
"Run main.py in export mode to produce decode_latest.safetensors",
|
| 32 |
+
"Upload the files listed in recommended_upload_files to the Hugging Face model repository"
|
| 33 |
+
],
|
| 34 |
+
"notes": [
|
| 35 |
+
"This is not a standard Transformers AutoModel checkpoint.",
|
| 36 |
+
"The README should remain at the repository root for Hugging Face model card rendering.",
|
| 37 |
+
"Add a license before publishing if the repo is public."
|
| 38 |
+
]
|
| 39 |
+
}
|