vLLM compatible randon Medusa heads for llama-68m (only to test code)
Browse files- .gitattributes +1 -0
- config.json +1 -0
- model.safetensors +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
model.safetensors filter=lfs diff=lfs merge=lfs -text
|
config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"_name_or_path": "abhigoyal/vllm-medusa-llama-68m-random", "architectures": ["MedusaModel"], "hidden_size": 768, "model_type": "medusa", "num_heads": 5, "num_hidden_layers": 1, "transformers_version": "4.41.2", "truncated_vocab_size": null, "vocab_size": 32000}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:77e25e430a4740d2a667770335d2703d2ea6c6adf91268193248b6e0df21a73c
|
3 |
+
size 503317424
|