IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
c72a57f verified
raw
history blame
9.65 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.0+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "67d850c9fa4c56f9fdc4ebb17d7e32fb16d42bf9",
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.33.0",
"accelerate_commit": null,
"diffusers_version": "0.30.1",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.9",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1123.135488,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.775064514999997,
"mean": 4.775064514999997,
"stdev": 0.0,
"p50": 4.775064514999997,
"p90": 4.775064514999997,
"p95": 4.775064514999997,
"p99": 4.775064514999997,
"values": [
4.775064514999997
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.826535037777718e-05,
"ram": 2.853313152413098e-06,
"gpu": 0,
"total": 7.111866353019027e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 976.478208,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.6459372350000194,
"mean": 0.04306248233333463,
"stdev": 0.002738235227586167,
"p50": 0.04512732400002051,
"p90": 0.04574027739998883,
"p95": 0.045874516600002836,
"p99": 0.04601342572001272,
"values": [
0.04604815300001519,
0.045543101000021124,
0.04552687000000333,
0.04565054199997576,
0.04558907700004511,
0.04565003999999817,
0.04512732400002051,
0.04580010099999754,
0.040337824000005185,
0.040158809999979894,
0.040242836000004445,
0.03986348899996983,
0.04007578499999909,
0.04011917700000822,
0.040204105999976036
]
},
"throughput": {
"unit": "tokens/s",
"value": 46.444140969825185
},
"energy": {
"unit": "kWh",
"cpu": 1.5941825246666846e-06,
"ram": 6.66237616532985e-08,
"gpu": 0.0,
"total": 1.660806286319983e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1204234.362835658
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 977.26464,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.4024064799999678,
"mean": 0.02682709866666452,
"stdev": 0.0017906043595530949,
"p50": 0.027881951000040317,
"p90": 0.028613849399982884,
"p95": 0.028776467099987713,
"p99": 0.02900067261998629,
"values": [
0.029056723999985934,
0.02839754299998276,
0.028656356999988475,
0.028332862000013392,
0.02847021899998481,
0.028538926999999603,
0.028550087999974494,
0.027881951000040317,
0.024991914999986875,
0.024593181000000186,
0.025119276999987505,
0.024830506000000696,
0.02500173500004621,
0.025052819999984877,
0.02493237499999168
]
},
"throughput": {
"unit": "tokens/s",
"value": 37.2757416828904
},
"energy": {
"unit": "kWh",
"cpu": 9.460267256805198e-07,
"ram": 3.953684857628131e-08,
"gpu": 0.0,
"total": 9.855635742568013e-07
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1014647.8889036509
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 15,
"total": 0.3962915740001449,
"mean": 0.026419438266676327,
"stdev": 0.0017088151327039633,
"p50": 0.02755766500001755,
"p90": 0.028095463600027414,
"p95": 0.028245171300022773,
"p99": 0.02846456305999709,
"values": [
0.02851941099999067,
0.02791238700001486,
0.028127640000036536,
0.02784822700004952,
0.027984182000011515,
0.028044625000006818,
0.028047199000013734,
0.02755766500001755,
0.024665465999987646,
0.024277380999990328,
0.024797847000002093,
0.024506461000044055,
0.02466908400003831,
0.024730457999964983,
0.024603540999976303
]
},
"throughput": {
"unit": "tokens/s",
"value": 37.85091832407851
},
"energy": null,
"efficiency": null
}
}
}