Benchmarks / results_arcee_supernova_medius.py
Julien Simon
H100 results for SuperNova Medius and FCv1
735361b
raw
history blame
3.06 kB
"""Module containing performance results for the Arcee-SuperNova-Medius model."""
results_arcee_supernova_medius = {
"name": "Arcee-SuperNova-Medius",
"modelType": "Qwen2.5 14B",
"configurations": [
{
"instanceType": "r8g.4xlarge",
"quantization": "Q8_0",
"container": "llama.cpp 11/27/24",
"status": "OK",
"tokensPerSecond": "13.5",
"notes": "-fa",
},
{
"instanceType": "r8g.4xlarge",
"quantization": "Q4_0_4_8",
"container": "llama.cpp 11/27/24",
"status": "OK",
"tokensPerSecond": "22",
"notes": "-fa",
},
{
"instanceType": "g5.12xlarge",
"quantization": "None",
"container": "LMI 0.30+vLLM 0.6.2",
"status": "OK",
"tokensPerSecond": "45",
"notes": "",
},
{
"instanceType": "g6.12xlarge",
"quantization": "None",
"container": "LMI 0.30+vLLM 0.6.2",
"status": "OK",
"tokensPerSecond": "29",
"notes": "",
},
{
"instanceType": "g6e.12xlarge",
"quantization": "None",
"container": "vLLM0.6.4.post1",
"status": "OK",
"tokensPerSecond": "70",
"notes": "--tensor-parallel-size 4 --max-model-len 16384",
},
{
"instanceType": "g6e.12xlarge (2 GPUs)",
"quantization": "None",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "43",
"notes": "--tensor-parallel-size 2 --max-model-len 16384",
},
{
"instanceType": "p4d.24xlarge",
"quantization": "None",
"container": "LMI 0.30+vLLM 0.6.2",
"status": "OK",
"tokensPerSecond": "108",
"notes": "",
},
{
"instanceType": "p5.48xlarge",
"quantization": "None",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "162",
"notes": "--tensor-parallel-size 8",
},
{
"instanceType": "p5.48xlarge (4 GPUs)",
"quantization": "None",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "138",
"notes": "--tensor-parallel-size 4",
},
{
"instanceType": "p5.48xlarge (2 GPUs)",
"quantization": "None",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "102",
"notes": "--tensor-parallel-size 2",
},
{
"instanceType": "p5.48xlarge (1 GPU)",
"quantization": "None",
"container": "vLLM 0.6.4.post1",
"status": "OK",
"tokensPerSecond": "73",
"notes": "--tensor-parallel-size 1",
},
],
}