meg HF staff commited on
Commit
0a48778
1 Parent(s): 4de2c83

Upload folder using huggingface_hub

Browse files
runs/text_generation/a100-large/google/gemma-2-9b-it/2024-10-31-01-46-53/.hydra/config.yaml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.4.0
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ task: text-generation
6
+ model: google/gemma-2-9b-it
7
+ processor: google/gemma-2-9b-it
8
+ library: null
9
+ device: cuda
10
+ device_ids: '0'
11
+ seed: 42
12
+ inter_op_num_threads: null
13
+ intra_op_num_threads: null
14
+ hub_kwargs: {}
15
+ no_weights: true
16
+ device_map: null
17
+ torch_dtype: null
18
+ amp_autocast: false
19
+ amp_dtype: null
20
+ eval_mode: true
21
+ to_bettertransformer: false
22
+ low_cpu_mem_usage: null
23
+ attn_implementation: null
24
+ cache_implementation: null
25
+ torch_compile: false
26
+ torch_compile_config: {}
27
+ quantization_scheme: null
28
+ quantization_config: {}
29
+ deepspeed_inference: false
30
+ deepspeed_inference_config: {}
31
+ peft_type: null
32
+ peft_config: {}
33
+ launcher:
34
+ name: process
35
+ _target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
36
+ device_isolation: false
37
+ device_isolation_action: warn
38
+ start_method: spawn
39
+ benchmark:
40
+ name: energy_star
41
+ _target_: optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark
42
+ dataset_name: EnergyStarAI/text_generation
43
+ dataset_config: ''
44
+ dataset_split: train
45
+ num_samples: 1000
46
+ input_shapes:
47
+ batch_size: 1
48
+ text_column_name: text
49
+ truncation: true
50
+ max_length: -1
51
+ dataset_prefix1: ''
52
+ dataset_prefix2: ''
53
+ t5_task: ''
54
+ image_column_name: image
55
+ resize: false
56
+ question_column_name: question
57
+ context_column_name: context
58
+ sentence1_column_name: sentence1
59
+ sentence2_column_name: sentence2
60
+ audio_column_name: audio
61
+ iterations: 10
62
+ warmup_runs: 10
63
+ energy: true
64
+ forward_kwargs: {}
65
+ generate_kwargs:
66
+ max_new_tokens: 10
67
+ min_new_tokens: 10
68
+ call_kwargs: {}
69
+ experiment_name: text_generation
70
+ environment:
71
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
72
+ cpu_count: 96
73
+ cpu_ram_mb: 1204529.905664
74
+ system: Linux
75
+ machine: x86_64
76
+ platform: Linux-5.10.223-212.873.amzn2.x86_64-x86_64-with-glibc2.35
77
+ processor: x86_64
78
+ python_version: 3.9.20
79
+ gpu:
80
+ - NVIDIA A100-SXM4-80GB
81
+ gpu_count: 1
82
+ gpu_vram_mb: 85899345920
83
+ optimum_benchmark_version: 0.2.0
84
+ optimum_benchmark_commit: null
85
+ transformers_version: 4.44.0
86
+ transformers_commit: null
87
+ accelerate_version: 0.33.0
88
+ accelerate_commit: null
89
+ diffusers_version: 0.30.0
90
+ diffusers_commit: null
91
+ optimum_version: null
92
+ optimum_commit: null
93
+ timm_version: null
94
+ timm_commit: null
95
+ peft_version: null
96
+ peft_commit: null
runs/text_generation/a100-large/google/gemma-2-9b-it/2024-10-31-01-46-53/.hydra/hydra.yaml ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: /runs/text_generation/a100-large/google/gemma-2-9b-it/2024-10-31-01-46-53
4
+ sweep:
5
+ dir: sweeps/${experiment_name}/${backend.model}/${now:%Y-%m-%d-%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ colorlog:
72
+ (): colorlog.ColoredFormatter
73
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
74
+ handlers:
75
+ console:
76
+ class: logging.StreamHandler
77
+ formatter: colorlog
78
+ stream: ext://sys.stdout
79
+ root:
80
+ level: INFO
81
+ handlers:
82
+ - console
83
+ disable_existing_loggers: false
84
+ job_logging:
85
+ version: 1
86
+ formatters:
87
+ simple:
88
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
89
+ colorlog:
90
+ (): colorlog.ColoredFormatter
91
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
92
+ - %(message)s'
93
+ log_colors:
94
+ DEBUG: purple
95
+ INFO: green
96
+ WARNING: yellow
97
+ ERROR: red
98
+ CRITICAL: red
99
+ handlers:
100
+ console:
101
+ class: logging.StreamHandler
102
+ formatter: colorlog
103
+ stream: ext://sys.stdout
104
+ file:
105
+ class: logging.FileHandler
106
+ formatter: simple
107
+ filename: ${hydra.job.name}.log
108
+ root:
109
+ level: INFO
110
+ handlers:
111
+ - console
112
+ - file
113
+ disable_existing_loggers: false
114
+ env: {}
115
+ mode: RUN
116
+ searchpath: []
117
+ callbacks: {}
118
+ output_subdir: .hydra
119
+ overrides:
120
+ hydra:
121
+ - hydra.run.dir=/runs/text_generation/a100-large/google/gemma-2-9b-it/2024-10-31-01-46-53
122
+ - hydra.mode=RUN
123
+ task:
124
+ - backend.model=google/gemma-2-9b-it
125
+ - backend.processor=google/gemma-2-9b-it
126
+ job:
127
+ name: cli
128
+ chdir: true
129
+ override_dirname: backend.model=google/gemma-2-9b-it,backend.processor=google/gemma-2-9b-it
130
+ id: ???
131
+ num: ???
132
+ config_name: text_generation
133
+ env_set:
134
+ OVERRIDE_BENCHMARKS: '1'
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /optimum-benchmark/examples/energy_star
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /runs/text_generation/a100-large/google/gemma-2-9b-it/2024-10-31-01-46-53
162
+ choices:
163
+ benchmark: energy_star
164
+ launcher: process
165
+ backend: pytorch
166
+ hydra/env: default
167
+ hydra/callbacks: null
168
+ hydra/job_logging: colorlog
169
+ hydra/hydra_logging: colorlog
170
+ hydra/hydra_help: default
171
+ hydra/help: default
172
+ hydra/sweeper: basic
173
+ hydra/launcher: basic
174
+ hydra/output: default
175
+ verbose: false
runs/text_generation/a100-large/google/gemma-2-9b-it/2024-10-31-01-46-53/.hydra/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - backend.model=google/gemma-2-9b-it
2
+ - backend.processor=google/gemma-2-9b-it
runs/text_generation/a100-large/google/gemma-2-9b-it/2024-10-31-01-46-53/cli.log ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [2024-10-31 01:46:56,483][launcher][INFO] - ََAllocating process launcher
2
+ [2024-10-31 01:46:56,483][process][INFO] - + Setting multiprocessing start method to spawn.
3
+ [2024-10-31 01:46:56,501][process][INFO] - + Launched benchmark in isolated process 1730.
4
+ [PROC-0][2024-10-31 01:46:59,507][datasets][INFO] - PyTorch version 2.4.0 available.
5
+ [PROC-0][2024-10-31 01:47:00,510][backend][INFO] - َAllocating pytorch backend
6
+ [PROC-0][2024-10-31 01:47:00,510][backend][INFO] - + Setting random seed to 42
7
+ [2024-10-31 01:47:01,438][experiment][ERROR] - Error during experiment
runs/text_generation/a100-large/google/gemma-2-9b-it/2024-10-31-01-46-53/error.log ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Error executing job with overrides: ['backend.model=google/gemma-2-9b-it', 'backend.processor=google/gemma-2-9b-it']
2
+ Traceback (most recent call last):
3
+ File "/optimum-benchmark/optimum_benchmark/cli.py", line 65, in benchmark_cli
4
+ benchmark_report: BenchmarkReport = launch(experiment_config=experiment_config)
5
+ File "/optimum-benchmark/optimum_benchmark/experiment.py", line 102, in launch
6
+ raise error
7
+ File "/optimum-benchmark/optimum_benchmark/experiment.py", line 90, in launch
8
+ report = launcher.launch(run, experiment_config.benchmark, experiment_config.backend)
9
+ File "/optimum-benchmark/optimum_benchmark/launchers/process/launcher.py", line 47, in launch
10
+ while not process_context.join():
11
+ File "/opt/conda/lib/python3.9/site-packages/torch/multiprocessing/spawn.py", line 189, in join
12
+ raise ProcessRaisedException(msg, error_index, failed_process.pid)
13
+ torch.multiprocessing.spawn.ProcessRaisedException:
14
+
15
+ -- Process 0 terminated with the following error:
16
+ Traceback (most recent call last):
17
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/utils/_errors.py", line 304, in hf_raise_for_status
18
+ response.raise_for_status()
19
+ File "/opt/conda/lib/python3.9/site-packages/requests/models.py", line 1024, in raise_for_status
20
+ raise HTTPError(http_error_msg, response=self)
21
+ requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2-9b-it/resolve/main/config.json
22
+
23
+ The above exception was the direct cause of the following exception:
24
+
25
+ Traceback (most recent call last):
26
+ File "/opt/conda/lib/python3.9/site-packages/transformers/utils/hub.py", line 402, in cached_file
27
+ resolved_file = hf_hub_download(
28
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/utils/_deprecation.py", line 101, in inner_f
29
+ return f(*args, **kwargs)
30
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
31
+ return fn(*args, **kwargs)
32
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/file_download.py", line 1240, in hf_hub_download
33
+ return _hf_hub_download_to_cache_dir(
34
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/file_download.py", line 1347, in _hf_hub_download_to_cache_dir
35
+ _raise_on_head_call_error(head_call_error, force_download, local_files_only)
36
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/file_download.py", line 1854, in _raise_on_head_call_error
37
+ raise head_call_error
38
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/file_download.py", line 1751, in _get_metadata_or_catch_error
39
+ metadata = get_hf_file_metadata(
40
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
41
+ return fn(*args, **kwargs)
42
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/file_download.py", line 1673, in get_hf_file_metadata
43
+ r = _request_wrapper(
44
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/file_download.py", line 376, in _request_wrapper
45
+ response = _request_wrapper(
46
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/file_download.py", line 400, in _request_wrapper
47
+ hf_raise_for_status(response)
48
+ File "/opt/conda/lib/python3.9/site-packages/huggingface_hub/utils/_errors.py", line 321, in hf_raise_for_status
49
+ raise GatedRepoError(message, response) from e
50
+ huggingface_hub.utils._errors.GatedRepoError: 403 Client Error. (Request ID: Root=1-6722e194-4007a0517a59c5f35f49c3c4;79e10e4d-6496-4a2c-8370-32e5f7ed6e79)
51
+
52
+ Cannot access gated repo for url https://huggingface.co/google/gemma-2-9b-it/resolve/main/config.json.
53
+ Your request to access model google/gemma-2-9b-it is awaiting a review from the repo authors.
54
+
55
+ The above exception was the direct cause of the following exception:
56
+
57
+ Traceback (most recent call last):
58
+ File "/opt/conda/lib/python3.9/site-packages/torch/multiprocessing/spawn.py", line 76, in _wrap
59
+ fn(i, *args)
60
+ File "/optimum-benchmark/optimum_benchmark/launchers/process/launcher.py", line 63, in entrypoint
61
+ worker_output = worker(*worker_args)
62
+ File "/optimum-benchmark/optimum_benchmark/experiment.py", line 55, in run
63
+ backend: Backend = backend_factory(backend_config)
64
+ File "/optimum-benchmark/optimum_benchmark/backends/pytorch/backend.py", line 45, in __init__
65
+ super().__init__(config)
66
+ File "/optimum-benchmark/optimum_benchmark/backends/base.py", line 65, in __init__
67
+ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.hub_kwargs)
68
+ File "/optimum-benchmark/optimum_benchmark/backends/transformers_utils.py", line 27, in get_transformers_pretrained_config
69
+ return AutoConfig.from_pretrained(model, **kwargs)
70
+ File "/opt/conda/lib/python3.9/site-packages/transformers/models/auto/configuration_auto.py", line 976, in from_pretrained
71
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
72
+ File "/opt/conda/lib/python3.9/site-packages/transformers/configuration_utils.py", line 632, in get_config_dict
73
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
74
+ File "/opt/conda/lib/python3.9/site-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
75
+ resolved_config_file = cached_file(
76
+ File "/opt/conda/lib/python3.9/site-packages/transformers/utils/hub.py", line 420, in cached_file
77
+ raise EnvironmentError(
78
+ OSError: You are trying to access a gated repo.
79
+ Make sure to have access to it at https://huggingface.co/google/gemma-2-9b-it.
80
+ 403 Client Error. (Request ID: Root=1-6722e194-4007a0517a59c5f35f49c3c4;79e10e4d-6496-4a2c-8370-32e5f7ed6e79)
81
+
82
+ Cannot access gated repo for url https://huggingface.co/google/gemma-2-9b-it/resolve/main/config.json.
83
+ Your request to access model google/gemma-2-9b-it is awaiting a review from the repo authors.
84
+
85
+
86
+ Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
runs/text_generation/a100-large/google/gemma-2-9b-it/2024-10-31-01-46-53/experiment_config.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "experiment_name": "text_generation",
3
+ "backend": {
4
+ "name": "pytorch",
5
+ "version": "2.4.0",
6
+ "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
7
+ "task": "text-generation",
8
+ "model": "google/gemma-2-9b-it",
9
+ "processor": "google/gemma-2-9b-it",
10
+ "library": "transformers",
11
+ "device": "cuda",
12
+ "device_ids": "0",
13
+ "seed": 42,
14
+ "inter_op_num_threads": null,
15
+ "intra_op_num_threads": null,
16
+ "hub_kwargs": {
17
+ "revision": "main",
18
+ "force_download": false,
19
+ "local_files_only": false,
20
+ "trust_remote_code": true
21
+ },
22
+ "no_weights": true,
23
+ "device_map": null,
24
+ "torch_dtype": null,
25
+ "amp_autocast": false,
26
+ "amp_dtype": null,
27
+ "eval_mode": true,
28
+ "to_bettertransformer": false,
29
+ "low_cpu_mem_usage": null,
30
+ "attn_implementation": null,
31
+ "cache_implementation": null,
32
+ "torch_compile": false,
33
+ "torch_compile_config": {},
34
+ "quantization_scheme": null,
35
+ "quantization_config": {},
36
+ "deepspeed_inference": false,
37
+ "deepspeed_inference_config": {},
38
+ "peft_type": null,
39
+ "peft_config": {}
40
+ },
41
+ "launcher": {
42
+ "name": "process",
43
+ "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
44
+ "device_isolation": false,
45
+ "device_isolation_action": "warn",
46
+ "start_method": "spawn"
47
+ },
48
+ "benchmark": {
49
+ "name": "energy_star",
50
+ "_target_": "optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark",
51
+ "dataset_name": "EnergyStarAI/text_generation",
52
+ "dataset_config": "",
53
+ "dataset_split": "train",
54
+ "num_samples": 1000,
55
+ "input_shapes": {
56
+ "batch_size": 1
57
+ },
58
+ "text_column_name": "text",
59
+ "truncation": true,
60
+ "max_length": -1,
61
+ "dataset_prefix1": "",
62
+ "dataset_prefix2": "",
63
+ "t5_task": "",
64
+ "image_column_name": "image",
65
+ "resize": false,
66
+ "question_column_name": "question",
67
+ "context_column_name": "context",
68
+ "sentence1_column_name": "sentence1",
69
+ "sentence2_column_name": "sentence2",
70
+ "audio_column_name": "audio",
71
+ "iterations": 10,
72
+ "warmup_runs": 10,
73
+ "energy": true,
74
+ "forward_kwargs": {},
75
+ "generate_kwargs": {
76
+ "max_new_tokens": 10,
77
+ "min_new_tokens": 10
78
+ },
79
+ "call_kwargs": {}
80
+ },
81
+ "environment": {
82
+ "cpu": " Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz",
83
+ "cpu_count": 96,
84
+ "cpu_ram_mb": 1204529.905664,
85
+ "system": "Linux",
86
+ "machine": "x86_64",
87
+ "platform": "Linux-5.10.223-212.873.amzn2.x86_64-x86_64-with-glibc2.35",
88
+ "processor": "x86_64",
89
+ "python_version": "3.9.20",
90
+ "gpu": [
91
+ "NVIDIA A100-SXM4-80GB"
92
+ ],
93
+ "gpu_count": 1,
94
+ "gpu_vram_mb": 85899345920,
95
+ "optimum_benchmark_version": "0.2.0",
96
+ "optimum_benchmark_commit": null,
97
+ "transformers_version": "4.44.0",
98
+ "transformers_commit": null,
99
+ "accelerate_version": "0.33.0",
100
+ "accelerate_commit": null,
101
+ "diffusers_version": "0.30.0",
102
+ "diffusers_commit": null,
103
+ "optimum_version": null,
104
+ "optimum_commit": null,
105
+ "timm_version": null,
106
+ "timm_commit": null,
107
+ "peft_version": null,
108
+ "peft_commit": null
109
+ }
110
+ }