machine
stringclasses 4
values | hardware
stringclasses 2
values | subsets
stringclasses 2
values | backends
stringclasses 1
value | model
stringclasses 47
values | success
bool 2
classes | traceback
stringlengths 0
7.23k
| last_updated
stringlengths 26
26
| run_id
stringclasses 12
values | run_start_time
stringclasses 12
values |
---|---|---|---|---|---|---|---|---|---|
1xA10 | cuda | torchao | pytorch | unsloth/llama-3-8b-Instruct-bnb-4bit | true | 2025-02-06T10:40:05.018385 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
|
1xT4 | cuda | torchao | pytorch | unsloth/llama-3-8b-Instruct-bnb-4bit | true | 2025-02-06T10:40:18.994851 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
|
1xA10 | cuda | torchao | pytorch | unsloth/llama-3-8b-Instruct-bnb-4bit | true | 2025-02-06T10:42:04.386430 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
|
1xA10 | cuda | torchao | pytorch | beomi/Llama-3-Open-Ko-8B | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f10f0237b50>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:42:31.974570 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xA10 | cuda | torchao | pytorch | beomi/Llama-3-Open-Ko-8B | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fde08302050>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:42:57.923617 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xA10 | cuda | torchao | pytorch | Gustavosta/MagicPrompt-Stable-Diffusion | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f2be95068c0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:43:24.075615 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | unsloth/llama-3-8b-Instruct-bnb-4bit | true | 2025-02-06T10:43:30.995029 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
|
1xA10 | cuda | torchao | pytorch | Gustavosta/MagicPrompt-Stable-Diffusion | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f1055450c10>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:43:49.493768 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | beomi/Llama-3-Open-Ko-8B | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f9dc4933b50>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:43:59.177249 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | mistralai/Mistral-7B-Instruct-v0.3 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fac2067c5e0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:44:18.052297 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | beomi/Llama-3-Open-Ko-8B | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fb46cb5e050>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:44:27.985055 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | mistralai/Mistral-7B-Instruct-v0.3 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f5b4052e830>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:44:45.608377 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | Gustavosta/MagicPrompt-Stable-Diffusion | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f5387fe28c0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:44:56.085599 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | TinyLlama/TinyLlama-1.1B-Chat-v1.0 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f0328189240>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:45:11.942536 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | Gustavosta/MagicPrompt-Stable-Diffusion | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fe321a48c10>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:45:23.817755 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | TinyLlama/TinyLlama-1.1B-Chat-v1.0 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f9b04527520>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:45:37.462225 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | mistralai/Mistral-7B-Instruct-v0.3 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f8441f9c5e0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:45:51.573467 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | microsoft/Phi-3.5-vision-instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f38f8196cb0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:46:04.410429 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | mistralai/Mistral-7B-Instruct-v0.3 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fcab8122830>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:46:20.227424 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | microsoft/Phi-3.5-vision-instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f7cf84d9000>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:46:32.704964 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | TinyLlama/TinyLlama-1.1B-Chat-v1.0 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fbc91b41240>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:46:51.310842 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | google/gemma-2b | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f3360f5fd90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:47:03.890796 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | TinyLlama/TinyLlama-1.1B-Chat-v1.0 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fa040817520>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:47:20.802896 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | google/gemma-2b | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fad2857e3b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:47:33.915533 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | microsoft/Phi-3.5-vision-instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f50d87aacb0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:47:49.735965 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | Qwen/Qwen2-7B-Instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f75607b6e60>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:48:04.590062 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | microsoft/Phi-3.5-vision-instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fbd08ebd000>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:48:19.442803 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | Qwen/Qwen2-7B-Instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f45b02cd3f0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:48:33.841091 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | google/gemma-2b | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f58e83ffd90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:48:55.678248 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | microsoft/Florence-2-large | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f4fe18c0040>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:49:05.243211 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | google/gemma-2b | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f92609323b0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:49:27.173776 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | microsoft/Florence-2-large | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fd520d9ea70>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:49:32.863658 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | Qwen/Qwen2-7B-Instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fd1b00e6e60>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:49:55.846678 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: bits for API: <function int4_weight_only at 0x7fd8bdf67d90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:49:59.327011 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xA10 | cuda | torchao | pytorch | hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: bits for API: <function int4_weight_only at 0x7f379878e0e0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:50:25.649621 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xA10 | cuda | torchao | pytorch | mistralai/Mistral-7B-v0.1 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f71c4a04310>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:50:51.873554 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | microsoft/Florence-2-large | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f2bfeb64040>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:50:53.589283 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | mistralai/Mistral-7B-v0.1 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fe2b4d067a0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:51:17.210472 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | microsoft/Florence-2-large | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7ff841662a70>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:51:21.154898 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xT4 | cuda | torchao | pytorch | hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: bits for API: <function int4_weight_only at 0x7ff2ac077d90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:51:49.936533 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | microsoft/Phi-3.5-mini-instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f963432ea70>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:52:14.618016 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: bits for API: <function int4_weight_only at 0x7f3af021a0e0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:52:17.742028 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | ghunkins/prompt-expansion | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fc954f928c0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:52:40.643027 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | mistralai/Mistral-7B-v0.1 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fd2d5234310>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:52:54.583468 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | ghunkins/prompt-expansion | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f550e9d0dc0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:53:07.516025 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | mistralai/Mistral-7B-v0.1 | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f222860a7a0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:53:21.580759 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | microsoft/DialoGPT-medium | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7faf82a8e8c0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:53:32.994848 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | microsoft/Phi-3.5-mini-instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fb499f701f0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:53:49.631522 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | microsoft/DialoGPT-medium | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7faeb5324dc0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:54:01.443479 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | microsoft/Phi-3.5-mini-instruct | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f2ffc282a70>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:54:16.117817 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f48c3881120>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:54:27.584880 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | ghunkins/prompt-expansion | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f123b47a8c0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:54:43.155549 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fb519887490>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:54:55.519993 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | ghunkins/prompt-expansion | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f30b4c3cdc0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:55:13.509086 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | google/gemma-2-2b-it | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fca844abd90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:55:30.718777 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | microsoft/DialoGPT-medium | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f32af09a8c0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:55:50.584140 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xA10 | cuda | torchao | pytorch | google/gemma-2-2b-it | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f7b5901e440>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:56:20.431419 | fc85a78b-b92a-46d9-b36b-715516040a48 | 2025-02-06T10:12:52.699176 |
1xT4 | cuda | torchao | pytorch | microsoft/DialoGPT-medium | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fde9e73cdc0>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:56:27.402227 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xT4 | cuda | torchao | pytorch | TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7fc708f39120>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:56:54.588372 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xT4 | cuda | torchao | pytorch | TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f6e5d27b490>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:57:22.451675 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xT4 | cuda | torchao | pytorch | google/gemma-2-2b-it | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7f96d0483d90>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:57:51.146950 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |
1xT4 | cuda | torchao | pytorch | google/gemma-2-2b-it | false | Traceback (most recent call last):
File "/workspace/llm_perf/common/benchmark_runner.py", line 319, in execute_and_log_benchmark
benchmark_report = Benchmark.launch(benchmark_config)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 51, in launch
report = launcher.launch(worker=Benchmark.run, worker_args=[config])
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 66, in launch
raise ChildProcessError(response["traceback"])
ChildProcessError: Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/launchers/process/launcher.py", line 103, in target
report = worker(*worker_args)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/benchmark/base.py", line 78, in run
report = scenario.run(backend)
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 129, in run
self.run_model_loading_tracking()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/scenarios/inference/scenario.py", line 184, in run_model_loading_tracking
self.backend.load()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 69, in load
self.load_transformers_model()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 142, in load_transformers_model
self.process_quantization_config()
File "/usr/local/lib/python3.10/dist-packages/optimum_benchmark/backends/pytorch/backend.py", line 310, in process_quantization_config
self.quantization_config = AutoQuantizationConfig.from_dict(
File "/usr/local/lib/python3.10/dist-packages/transformers/quantizers/auto.py", line 103, in from_dict
return target_cls.from_dict(quantization_config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 102, in from_dict
config = cls(**config_dict)
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1269, in __init__
self.post_init()
File "/usr/local/lib/python3.10/dist-packages/transformers/utils/quantization_config.py", line 1298, in post_init
raise ValueError(
ValueError: Unexpected keyword arg: quant_method for API: <function int4_weight_only at 0x7ff7a4432440>, accepted keyword args are: ['group_size', 'layout', 'use_hqq', 'zero_point_domain']
| 2025-02-06T10:58:18.741711 | bd27598f-6f94-4e3d-9747-85c8ce835edd | 2025-02-06T10:09:56.868153 |