sasha HF staff commited on
Commit
e3922cc
1 Parent(s): d2b016c

Upload folder using huggingface_hub

Browse files
text_generation/google/gemma-2-2b/2024-10-24-19-05-37/.hydra/config.yaml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.4.0
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ task: text-generation
6
+ model: google/gemma-2-2b
7
+ processor: google/gemma-2-2b
8
+ library: null
9
+ device: cuda
10
+ device_ids: '0'
11
+ seed: 42
12
+ inter_op_num_threads: null
13
+ intra_op_num_threads: null
14
+ hub_kwargs: {}
15
+ no_weights: true
16
+ device_map: null
17
+ torch_dtype: null
18
+ amp_autocast: false
19
+ amp_dtype: null
20
+ eval_mode: true
21
+ to_bettertransformer: false
22
+ low_cpu_mem_usage: null
23
+ attn_implementation: null
24
+ cache_implementation: null
25
+ torch_compile: false
26
+ torch_compile_config: {}
27
+ quantization_scheme: null
28
+ quantization_config: {}
29
+ deepspeed_inference: false
30
+ deepspeed_inference_config: {}
31
+ peft_type: null
32
+ peft_config: {}
33
+ launcher:
34
+ name: process
35
+ _target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
36
+ device_isolation: false
37
+ device_isolation_action: warn
38
+ start_method: spawn
39
+ benchmark:
40
+ name: energy_star
41
+ _target_: optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark
42
+ dataset_name: EnergyStarAI/text_generation
43
+ dataset_config: ''
44
+ dataset_split: train
45
+ num_samples: 1000
46
+ input_shapes:
47
+ batch_size: 1
48
+ text_column_name: text
49
+ truncation: true
50
+ max_length: -1
51
+ dataset_prefix1: ''
52
+ dataset_prefix2: ''
53
+ t5_task: ''
54
+ image_column_name: image
55
+ resize: false
56
+ question_column_name: question
57
+ context_column_name: context
58
+ sentence1_column_name: sentence1
59
+ sentence2_column_name: sentence2
60
+ audio_column_name: audio
61
+ iterations: 10
62
+ warmup_runs: 10
63
+ energy: true
64
+ forward_kwargs: {}
65
+ generate_kwargs:
66
+ max_new_tokens: 10
67
+ min_new_tokens: 10
68
+ call_kwargs: {}
69
+ experiment_name: text_generation
70
+ environment:
71
+ cpu: ' AMD EPYC 7R32'
72
+ cpu_count: 48
73
+ cpu_ram_mb: 200472.73984
74
+ system: Linux
75
+ machine: x86_64
76
+ platform: Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35
77
+ processor: x86_64
78
+ python_version: 3.9.20
79
+ gpu:
80
+ - NVIDIA A10G
81
+ gpu_count: 1
82
+ gpu_vram_mb: 24146608128
83
+ optimum_benchmark_version: 0.2.0
84
+ optimum_benchmark_commit: null
85
+ transformers_version: 4.44.0
86
+ transformers_commit: null
87
+ accelerate_version: 0.33.0
88
+ accelerate_commit: null
89
+ diffusers_version: 0.30.0
90
+ diffusers_commit: null
91
+ optimum_version: null
92
+ optimum_commit: null
93
+ timm_version: null
94
+ timm_commit: null
95
+ peft_version: null
96
+ peft_commit: null
text_generation/google/gemma-2-2b/2024-10-24-19-05-37/.hydra/hydra.yaml ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ./runs/text_generation/google/gemma-2-2b/2024-10-24-19-05-37
4
+ sweep:
5
+ dir: sweeps/${experiment_name}/${backend.model}/${now:%Y-%m-%d-%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ colorlog:
72
+ (): colorlog.ColoredFormatter
73
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
74
+ handlers:
75
+ console:
76
+ class: logging.StreamHandler
77
+ formatter: colorlog
78
+ stream: ext://sys.stdout
79
+ root:
80
+ level: INFO
81
+ handlers:
82
+ - console
83
+ disable_existing_loggers: false
84
+ job_logging:
85
+ version: 1
86
+ formatters:
87
+ simple:
88
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
89
+ colorlog:
90
+ (): colorlog.ColoredFormatter
91
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
92
+ - %(message)s'
93
+ log_colors:
94
+ DEBUG: purple
95
+ INFO: green
96
+ WARNING: yellow
97
+ ERROR: red
98
+ CRITICAL: red
99
+ handlers:
100
+ console:
101
+ class: logging.StreamHandler
102
+ formatter: colorlog
103
+ stream: ext://sys.stdout
104
+ file:
105
+ class: logging.FileHandler
106
+ formatter: simple
107
+ filename: ${hydra.job.name}.log
108
+ root:
109
+ level: INFO
110
+ handlers:
111
+ - console
112
+ - file
113
+ disable_existing_loggers: false
114
+ env: {}
115
+ mode: RUN
116
+ searchpath: []
117
+ callbacks: {}
118
+ output_subdir: .hydra
119
+ overrides:
120
+ hydra:
121
+ - hydra.run.dir=./runs/text_generation/google/gemma-2-2b/2024-10-24-19-05-37
122
+ - hydra.mode=RUN
123
+ task:
124
+ - backend.model=google/gemma-2-2b
125
+ - backend.processor=google/gemma-2-2b
126
+ job:
127
+ name: cli
128
+ chdir: true
129
+ override_dirname: backend.model=google/gemma-2-2b,backend.processor=google/gemma-2-2b
130
+ id: ???
131
+ num: ???
132
+ config_name: text_generation
133
+ env_set:
134
+ OVERRIDE_BENCHMARKS: '1'
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /optimum-benchmark/examples/energy_star
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /runs/text_generation/google/gemma-2-2b/2024-10-24-19-05-37
162
+ choices:
163
+ benchmark: energy_star
164
+ launcher: process
165
+ backend: pytorch
166
+ hydra/env: default
167
+ hydra/callbacks: null
168
+ hydra/job_logging: colorlog
169
+ hydra/hydra_logging: colorlog
170
+ hydra/hydra_help: default
171
+ hydra/help: default
172
+ hydra/sweeper: basic
173
+ hydra/launcher: basic
174
+ hydra/output: default
175
+ verbose: false
text_generation/google/gemma-2-2b/2024-10-24-19-05-37/.hydra/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - backend.model=google/gemma-2-2b
2
+ - backend.processor=google/gemma-2-2b
text_generation/google/gemma-2-2b/2024-10-24-19-05-37/cli.log ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-10-24 19:05:40,348][launcher][INFO] - ََAllocating process launcher
2
+ [2024-10-24 19:05:40,349][process][INFO] - + Setting multiprocessing start method to spawn.
3
+ [2024-10-24 19:05:40,359][process][INFO] - + Launched benchmark in isolated process 180.
4
+ [PROC-0][2024-10-24 19:05:42,915][datasets][INFO] - PyTorch version 2.4.0 available.
5
+ [PROC-0][2024-10-24 19:05:43,814][backend][INFO] - َAllocating pytorch backend
6
+ [PROC-0][2024-10-24 19:05:43,814][backend][INFO] - + Setting random seed to 42
7
+ [PROC-0][2024-10-24 19:05:45,769][pytorch][INFO] - + Using AutoModel class AutoModelForCausalLM
8
+ [PROC-0][2024-10-24 19:05:45,769][pytorch][INFO] - + Creating backend temporary directory
9
+ [PROC-0][2024-10-24 19:05:45,769][pytorch][INFO] - + Loading model with random weights
10
+ [PROC-0][2024-10-24 19:05:45,769][pytorch][INFO] - + Creating no weights model
11
+ [PROC-0][2024-10-24 19:05:45,769][pytorch][INFO] - + Creating no weights model directory
12
+ [PROC-0][2024-10-24 19:05:45,769][pytorch][INFO] - + Creating no weights model state dict
13
+ [PROC-0][2024-10-24 19:05:45,793][pytorch][INFO] - + Saving no weights model safetensors
14
+ [PROC-0][2024-10-24 19:05:45,793][pytorch][INFO] - + Saving no weights model pretrained config
15
+ [PROC-0][2024-10-24 19:05:45,794][pytorch][INFO] - + Loading no weights AutoModel
16
+ [PROC-0][2024-10-24 19:05:45,794][pytorch][INFO] - + Loading model directly on device: cuda
17
+ [PROC-0][2024-10-24 19:05:46,061][pytorch][INFO] - + Turning on model's eval mode
18
+ [PROC-0][2024-10-24 19:05:46,067][benchmark][INFO] - Allocating energy_star benchmark
19
+ [PROC-0][2024-10-24 19:05:46,068][energy_star][INFO] - + Loading raw dataset
20
+ [PROC-0][2024-10-24 19:05:47,074][energy_star][INFO] - + Updating Text Generation kwargs with default values
21
+ [PROC-0][2024-10-24 19:05:47,074][energy_star][INFO] - + Initializing Text Generation report
22
+ [PROC-0][2024-10-24 19:05:47,074][energy][INFO] - + Tracking GPU energy on devices [0]
23
+ [PROC-0][2024-10-24 19:05:51,277][energy_star][INFO] - + Preprocessing dataset
24
+ [PROC-0][2024-10-24 19:05:52,517][energy][INFO] - + Saving codecarbon emission data to preprocess_codecarbon.json
25
+ [PROC-0][2024-10-24 19:05:52,517][energy_star][INFO] - + Preparing backend for Inference
26
+ [PROC-0][2024-10-24 19:05:52,517][energy_star][INFO] - + Initialising dataloader
27
+ [PROC-0][2024-10-24 19:05:52,517][energy_star][INFO] - + Warming up backend for Inference
28
+ [PROC-0][2024-10-24 19:05:54,536][energy_star][INFO] - + Additional warmup for Text Generation
29
+ [PROC-0][2024-10-24 19:05:54,956][energy_star][INFO] - + Running Text Generation energy tracking for 10 iterations
30
+ [PROC-0][2024-10-24 19:05:54,956][energy_star][INFO] - + Prefill iteration 1/10
31
+ [2024-10-24 19:06:10,217][experiment][ERROR] - Error during experiment
text_generation/google/gemma-2-2b/2024-10-24-19-05-37/error.log ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0
  0%| | 0/1000 [00:00<?, ?it/s]
1
  0%| | 2/1000 [00:00<01:11, 13.99it/s]
2
  0%| | 4/1000 [00:00<01:08, 14.62it/s]
3
  1%| | 6/1000 [00:00<02:30, 6.61it/s]
4
  1%| | 8/1000 [00:01<05:21, 3.09it/s]
5
  1%| | 10/1000 [00:02<03:56, 4.19it/s]
6
  1%| | 11/1000 [00:02<04:47, 3.44it/s]
7
  1%|▏ | 13/1000 [00:02<03:35, 4.58it/s]
8
  1%|▏ | 14/1000 [00:03<04:04, 4.03it/s]
9
  2%|▏ | 15/1000 [00:03<03:50, 4.27it/s]
10
  2%|▏ | 17/1000 [00:03<02:45, 5.93it/s]
11
  2%|▏ | 19/1000 [00:03<02:17, 7.13it/s]
12
  2%|▏ | 21/1000 [00:03<01:56, 8.44it/s]
13
  2%|▏ | 23/1000 [00:03<01:42, 9.56it/s]
14
  2%|▎ | 25/1000 [00:04<01:32, 10.54it/s]
15
  3%|▎ | 27/1000 [00:04<02:04, 7.78it/s]
16
  3%|▎ | 29/1000 [00:04<01:45, 9.22it/s]
17
  3%|▎ | 31/1000 [00:04<01:39, 9.77it/s]
18
  3%|▎ | 33/1000 [00:04<01:30, 10.64it/s]
19
  4%|▎ | 35/1000 [00:05<01:33, 10.31it/s]
20
  4%|▎ | 37/1000 [00:05<01:32, 10.46it/s]
21
  4%|▍ | 39/1000 [00:05<02:02, 7.83it/s]
22
  4%|▍ | 41/1000 [00:06<02:26, 6.57it/s]
23
  4%|▍ | 43/1000 [00:06<02:04, 7.72it/s]
24
  4%|▍ | 45/1000 [00:06<01:46, 8.99it/s]
25
  5%|▍ | 47/1000 [00:06<01:43, 9.17it/s]
26
  5%|▍ | 49/1000 [00:06<01:34, 10.04it/s]
27
  5%|▌ | 51/1000 [00:07<01:51, 8.48it/s]
28
  5%|▌ | 52/1000 [00:07<01:48, 8.71it/s]
29
  5%|▌ | 53/1000 [00:07<02:10, 7.26it/s]
30
  6%|▌ | 55/1000 [00:07<01:49, 8.62it/s]
31
  6%|▌ | 56/1000 [00:07<02:07, 7.41it/s]
32
  6%|▌ | 58/1000 [00:08<01:46, 8.84it/s]
33
  6%|▌ | 59/1000 [00:08<01:48, 8.69it/s]
34
  6%|▌ | 60/1000 [00:08<01:55, 8.13it/s]
35
  6%|▌ | 61/1000 [00:08<01:55, 8.15it/s]
36
  6%|▌ | 62/1000 [00:08<02:16, 6.87it/s]
37
  6%|▋ | 64/1000 [00:08<01:49, 8.56it/s]
38
  6%|▋ | 65/1000 [00:09<02:19, 6.68it/s]
39
  7%|▋ | 66/1000 [00:09<02:20, 6.64it/s]
40
  7%|▋ | 67/1000 [00:09<02:09, 7.22it/s]
41
  7%|▋ | 68/1000 [00:09<02:04, 7.50it/s]
42
  7%|▋ | 70/1000 [00:09<01:53, 8.20it/s]
43
  7%|▋ | 71/1000 [00:09<02:40, 5.79it/s]
44
  7%|▋ | 73/1000 [00:10<02:04, 7.44it/s]
45
  7%|▋ | 74/1000 [00:10<02:15, 6.85it/s]
46
  8%|▊ | 76/1000 [00:10<01:52, 8.23it/s]
47
  8%|▊ | 78/1000 [00:10<01:34, 9.77it/s]
48
  8%|▊ | 80/1000 [00:10<01:49, 8.39it/s]
49
  8%|▊ | 81/1000 [00:11<02:08, 7.13it/s]
50
  8%|▊ | 82/1000 [00:11<02:01, 7.53it/s]
51
  8%|▊ | 84/1000 [00:11<01:46, 8.58it/s]
52
  8%|▊ | 85/1000 [00:14<02:36, 5.85it/s]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+
5
+
6
+ [codecarbon INFO @ 19:05:47] [setup] RAM Tracking...
7
+ [codecarbon INFO @ 19:05:47] [setup] GPU Tracking...
8
+ [codecarbon INFO @ 19:05:47] Tracking Nvidia GPU via pynvml
9
+ [codecarbon DEBUG @ 19:05:47] GPU available. Starting setup
10
+ [codecarbon INFO @ 19:05:47] [setup] CPU Tracking...
11
+ [codecarbon DEBUG @ 19:05:47] Not using PowerGadget, an exception occurred while instantiating IntelPowerGadget : Platform not supported by Intel Power Gadget
12
+ [codecarbon DEBUG @ 19:05:47] Not using the RAPL interface, an exception occurred while instantiating IntelRAPL : Intel RAPL files not found at /sys/class/powercap/intel-rapl on linux
13
+ [codecarbon DEBUG @ 19:05:47] Not using PowerMetrics, an exception occurred while instantiating Powermetrics : Platform not supported by Powermetrics
14
+ [codecarbon WARNING @ 19:05:47] No CPU tracking mode found. Falling back on CPU constant mode.
15
+ [codecarbon WARNING @ 19:05:48] We saw that you have a AMD EPYC 7R32 but we don't know it. Please contact us.
16
+ [codecarbon INFO @ 19:05:48] CPU Model on constant consumption mode: AMD EPYC 7R32
17
+ [codecarbon INFO @ 19:05:48] >>> Tracker's metadata:
18
+ [codecarbon INFO @ 19:05:48] Platform system: Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35
19
+ [codecarbon INFO @ 19:05:48] Python version: 3.9.20
20
+ [codecarbon INFO @ 19:05:48] CodeCarbon version: 2.5.1
21
+ [codecarbon INFO @ 19:05:48] Available RAM : 186.705 GB
22
+ [codecarbon INFO @ 19:05:48] CPU count: 48
23
+ [codecarbon INFO @ 19:05:48] CPU model: AMD EPYC 7R32
24
+ [codecarbon INFO @ 19:05:48] GPU count: 1
25
+ [codecarbon INFO @ 19:05:48] GPU model: 1 x NVIDIA A10G
26
+ [codecarbon DEBUG @ 19:05:49] Not running on AWS
27
+ [codecarbon DEBUG @ 19:05:50] Not running on Azure
28
+ [codecarbon DEBUG @ 19:05:51] Not running on GCP
29
+ [codecarbon INFO @ 19:05:51] Saving emissions data to file /runs/text_generation/google/gemma-2-2b/2024-10-24-19-05-37/codecarbon.csv
30
+ [codecarbon DEBUG @ 19:05:51] EmissionsData(timestamp='2024-10-24T19:05:51', project_name='codecarbon', run_id='af8cf9cd-3ff9-45b1-87f5-8bd44c5df4f0', duration=0.0021458529954543337, emissions=0.0, emissions_rate=0.0, cpu_power=0.0, gpu_power=0.0, ram_power=0.0, cpu_energy=0, gpu_energy=0, ram_energy=0, energy_consumed=0, country_name='United States', country_iso_code='USA', region='virginia', cloud_provider='', cloud_region='', os='Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35', python_version='3.9.20', codecarbon_version='2.5.1', cpu_count=48, cpu_model='AMD EPYC 7R32', gpu_count=1, gpu_model='1 x NVIDIA A10G', longitude=-77.4903, latitude=39.0469, ram_total_size=186.7047882080078, tracking_mode='process', on_cloud='N', pue=1.0)
31
+
32
+
33
+ [codecarbon INFO @ 19:05:52] Energy consumed for RAM : 0.000000 kWh. RAM Power : 0.36616086959838867 W
34
+ [codecarbon DEBUG @ 19:05:52] RAM : 0.37 W during 1.24 s [measurement time: 0.0005]
35
+ [codecarbon INFO @ 19:05:52] Energy consumed for all GPUs : 0.000024 kWh. Total GPU Power : 71.15502348245434 W
36
+ [codecarbon DEBUG @ 19:05:52] GPU : 71.16 W during 1.24 s [measurement time: 0.0023]
37
+ [codecarbon INFO @ 19:05:52] Energy consumed for all CPUs : 0.000015 kWh. Total CPU Power : 42.5 W
38
+ [codecarbon DEBUG @ 19:05:52] CPU : 42.50 W during 1.24 s [measurement time: 0.0000]
39
+ [codecarbon INFO @ 19:05:52] 0.000039 kWh of electricity used since the beginning.
40
+ [codecarbon DEBUG @ 19:05:52] last_duration=1.2355880289978813
41
+ ------------------------
42
+ [codecarbon DEBUG @ 19:05:52] EmissionsData(timestamp='2024-10-24T19:05:52', project_name='codecarbon', run_id='af8cf9cd-3ff9-45b1-87f5-8bd44c5df4f0', duration=1.238806013003341, emissions=1.4465120447672696e-05, emissions_rate=1.1676663089972978e-05, cpu_power=42.5, gpu_power=71.15502348245434, ram_power=0.36616086959838867, cpu_energy=1.4623404627743892e-05, gpu_energy=2.4437519550057907e-05, ram_energy=1.2567374983479889e-07, energy_consumed=3.9186597927636596e-05, country_name='United States', country_iso_code='USA', region='virginia', cloud_provider='', cloud_region='', os='Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35', python_version='3.9.20', codecarbon_version='2.5.1', cpu_count=48, cpu_model='AMD EPYC 7R32', gpu_count=1, gpu_model='1 x NVIDIA A10G', longitude=-77.4903, latitude=39.0469, ram_total_size=186.7047882080078, tracking_mode='process', on_cloud='N', pue=1.0)
43
+ [codecarbon DEBUG @ 19:05:54] EmissionsData(timestamp='2024-10-24T19:05:54', project_name='codecarbon', run_id='af8cf9cd-3ff9-45b1-87f5-8bd44c5df4f0', duration=0.0022436440049204975, emissions=1.4465120447672696e-05, emissions_rate=0.006447154903340051, cpu_power=42.5, gpu_power=71.15502348245434, ram_power=0.36616086959838867, cpu_energy=1.4623404627743892e-05, gpu_energy=2.4437519550057907e-05, ram_energy=1.2567374983479889e-07, energy_consumed=3.9186597927636596e-05, country_name='United States', country_iso_code='USA', region='virginia', cloud_provider='', cloud_region='', os='Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35', python_version='3.9.20', codecarbon_version='2.5.1', cpu_count=48, cpu_model='AMD EPYC 7R32', gpu_count=1, gpu_model='1 x NVIDIA A10G', longitude=-77.4903, latitude=39.0469, ram_total_size=186.7047882080078, tracking_mode='process', on_cloud='N', pue=1.0)
44
+
45
  0%| | 0/1000 [00:00<?, ?it/s]
46
  0%| | 2/1000 [00:00<01:11, 13.99it/s]
47
  0%| | 4/1000 [00:00<01:08, 14.62it/s]
48
  1%| | 6/1000 [00:00<02:30, 6.61it/s]
49
  1%| | 8/1000 [00:01<05:21, 3.09it/s]
50
  1%| | 10/1000 [00:02<03:56, 4.19it/s]
51
  1%| | 11/1000 [00:02<04:47, 3.44it/s]
52
  1%|▏ | 13/1000 [00:02<03:35, 4.58it/s]
53
  1%|▏ | 14/1000 [00:03<04:04, 4.03it/s]
54
  2%|▏ | 15/1000 [00:03<03:50, 4.27it/s]
55
  2%|▏ | 17/1000 [00:03<02:45, 5.93it/s]
56
  2%|▏ | 19/1000 [00:03<02:17, 7.13it/s]
57
  2%|▏ | 21/1000 [00:03<01:56, 8.44it/s]
58
  2%|▏ | 23/1000 [00:03<01:42, 9.56it/s]
59
  2%|▎ | 25/1000 [00:04<01:32, 10.54it/s]
60
  3%|▎ | 27/1000 [00:04<02:04, 7.78it/s]
61
  3%|▎ | 29/1000 [00:04<01:45, 9.22it/s]
62
  3%|▎ | 31/1000 [00:04<01:39, 9.77it/s]
63
  3%|▎ | 33/1000 [00:04<01:30, 10.64it/s]
64
  4%|▎ | 35/1000 [00:05<01:33, 10.31it/s]
65
  4%|▎ | 37/1000 [00:05<01:32, 10.46it/s]
66
  4%|▍ | 39/1000 [00:05<02:02, 7.83it/s]
67
  4%|▍ | 41/1000 [00:06<02:26, 6.57it/s]
68
  4%|▍ | 43/1000 [00:06<02:04, 7.72it/s]
69
  4%|▍ | 45/1000 [00:06<01:46, 8.99it/s]
70
  5%|▍ | 47/1000 [00:06<01:43, 9.17it/s]
71
  5%|▍ | 49/1000 [00:06<01:34, 10.04it/s]
72
  5%|▌ | 51/1000 [00:07<01:51, 8.48it/s]
73
  5%|▌ | 52/1000 [00:07<01:48, 8.71it/s]
74
  5%|▌ | 53/1000 [00:07<02:10, 7.26it/s]
75
  6%|▌ | 55/1000 [00:07<01:49, 8.62it/s]
76
  6%|▌ | 56/1000 [00:07<02:07, 7.41it/s]
77
  6%|▌ | 58/1000 [00:08<01:46, 8.84it/s]
78
  6%|▌ | 59/1000 [00:08<01:48, 8.69it/s]
79
  6%|▌ | 60/1000 [00:08<01:55, 8.13it/s]
80
  6%|▌ | 61/1000 [00:08<01:55, 8.15it/s]
81
  6%|▌ | 62/1000 [00:08<02:16, 6.87it/s]
82
  6%|▋ | 64/1000 [00:08<01:49, 8.56it/s]
83
  6%|▋ | 65/1000 [00:09<02:19, 6.68it/s]
84
  7%|▋ | 66/1000 [00:09<02:20, 6.64it/s]
85
  7%|▋ | 67/1000 [00:09<02:09, 7.22it/s]
86
  7%|▋ | 68/1000 [00:09<02:04, 7.50it/s]
87
  7%|▋ | 70/1000 [00:09<01:53, 8.20it/s]
88
  7%|▋ | 71/1000 [00:09<02:40, 5.79it/s]
89
  7%|▋ | 73/1000 [00:10<02:04, 7.44it/s]
90
  7%|▋ | 74/1000 [00:10<02:15, 6.85it/s]
91
  8%|▊ | 76/1000 [00:10<01:52, 8.23it/s]
92
  8%|▊ | 78/1000 [00:10<01:34, 9.77it/s]
93
  8%|▊ | 80/1000 [00:10<01:49, 8.39it/s]
94
  8%|▊ | 81/1000 [00:11<02:08, 7.13it/s]
95
  8%|▊ | 82/1000 [00:11<02:01, 7.53it/s]
96
  8%|▊ | 84/1000 [00:11<01:46, 8.58it/s]
97
  8%|▊ | 85/1000 [00:14<02:36, 5.85it/s]
98
+ Error executing job with overrides: ['backend.model=google/gemma-2-2b', 'backend.processor=google/gemma-2-2b']
99
+ Traceback (most recent call last):
100
+ File "/optimum-benchmark/optimum_benchmark/cli.py", line 65, in benchmark_cli
101
+ benchmark_report: BenchmarkReport = launch(experiment_config=experiment_config)
102
+ File "/optimum-benchmark/optimum_benchmark/experiment.py", line 102, in launch
103
+ raise error
104
+ File "/optimum-benchmark/optimum_benchmark/experiment.py", line 90, in launch
105
+ report = launcher.launch(run, experiment_config.benchmark, experiment_config.backend)
106
+ File "/optimum-benchmark/optimum_benchmark/launchers/process/launcher.py", line 47, in launch
107
+ while not process_context.join():
108
+ File "/opt/conda/lib/python3.9/site-packages/torch/multiprocessing/spawn.py", line 189, in join
109
+ raise ProcessRaisedException(msg, error_index, failed_process.pid)
110
+ torch.multiprocessing.spawn.ProcessRaisedException:
111
+
112
+ -- Process 0 terminated with the following error:
113
+ Traceback (most recent call last):
114
+ File "/opt/conda/lib/python3.9/site-packages/torch/multiprocessing/spawn.py", line 76, in _wrap
115
+ fn(i, *args)
116
+ File "/optimum-benchmark/optimum_benchmark/launchers/process/launcher.py", line 63, in entrypoint
117
+ worker_output = worker(*worker_args)
118
+ File "/optimum-benchmark/optimum_benchmark/experiment.py", line 62, in run
119
+ benchmark.run(backend)
120
+ File "/optimum-benchmark/optimum_benchmark/benchmarks/energy_star/benchmark.py", line 174, in run
121
+ self.run_text_generation_energy_tracking(backend)
122
+ File "/optimum-benchmark/optimum_benchmark/benchmarks/energy_star/benchmark.py", line 198, in run_text_generation_energy_tracking
123
+ _ = backend.prefill(inputs, prefill_kwargs)
124
+ File "/opt/conda/lib/python3.9/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
125
+ return func(*args, **kwargs)
126
+ File "/optimum-benchmark/optimum_benchmark/backends/pytorch/backend.py", line 350, in prefill
127
+ return self.pretrained_model.generate(**inputs, **kwargs)
128
+ File "/opt/conda/lib/python3.9/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
129
+ return func(*args, **kwargs)
130
+ File "/opt/conda/lib/python3.9/site-packages/transformers/generation/utils.py", line 2024, in generate
131
+ result = self._sample(
132
+ File "/opt/conda/lib/python3.9/site-packages/transformers/generation/utils.py", line 2982, in _sample
133
+ outputs = self(**model_inputs, return_dict=True)
134
+ File "/opt/conda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
135
+ return self._call_impl(*args, **kwargs)
136
+ File "/opt/conda/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
137
+ return forward_call(*args, **kwargs)
138
+ File "/opt/conda/lib/python3.9/site-packages/transformers/models/gemma2/modeling_gemma2.py", line 1015, in forward
139
+ logits = logits / self.config.final_logit_softcapping
140
+ torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 6.02 GiB. GPU 0 has a total capacity of 22.19 GiB of which 2.78 GiB is free. Process 139203 has 19.40 GiB memory in use. Of the allocated memory 17.07 GiB is allocated by PyTorch, and 2.04 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
141
+
142
+
143
+ Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
text_generation/google/gemma-2-2b/2024-10-24-19-05-37/experiment_config.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "experiment_name": "text_generation",
3
+ "backend": {
4
+ "name": "pytorch",
5
+ "version": "2.4.0",
6
+ "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
7
+ "task": "text-generation",
8
+ "model": "google/gemma-2-2b",
9
+ "processor": "google/gemma-2-2b",
10
+ "library": "transformers",
11
+ "device": "cuda",
12
+ "device_ids": "0",
13
+ "seed": 42,
14
+ "inter_op_num_threads": null,
15
+ "intra_op_num_threads": null,
16
+ "hub_kwargs": {
17
+ "revision": "main",
18
+ "force_download": false,
19
+ "local_files_only": false,
20
+ "trust_remote_code": true
21
+ },
22
+ "no_weights": true,
23
+ "device_map": null,
24
+ "torch_dtype": null,
25
+ "amp_autocast": false,
26
+ "amp_dtype": null,
27
+ "eval_mode": true,
28
+ "to_bettertransformer": false,
29
+ "low_cpu_mem_usage": null,
30
+ "attn_implementation": null,
31
+ "cache_implementation": null,
32
+ "torch_compile": false,
33
+ "torch_compile_config": {},
34
+ "quantization_scheme": null,
35
+ "quantization_config": {},
36
+ "deepspeed_inference": false,
37
+ "deepspeed_inference_config": {},
38
+ "peft_type": null,
39
+ "peft_config": {}
40
+ },
41
+ "launcher": {
42
+ "name": "process",
43
+ "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
44
+ "device_isolation": false,
45
+ "device_isolation_action": "warn",
46
+ "start_method": "spawn"
47
+ },
48
+ "benchmark": {
49
+ "name": "energy_star",
50
+ "_target_": "optimum_benchmark.benchmarks.energy_star.benchmark.EnergyStarBenchmark",
51
+ "dataset_name": "EnergyStarAI/text_generation",
52
+ "dataset_config": "",
53
+ "dataset_split": "train",
54
+ "num_samples": 1000,
55
+ "input_shapes": {
56
+ "batch_size": 1
57
+ },
58
+ "text_column_name": "text",
59
+ "truncation": true,
60
+ "max_length": -1,
61
+ "dataset_prefix1": "",
62
+ "dataset_prefix2": "",
63
+ "t5_task": "",
64
+ "image_column_name": "image",
65
+ "resize": false,
66
+ "question_column_name": "question",
67
+ "context_column_name": "context",
68
+ "sentence1_column_name": "sentence1",
69
+ "sentence2_column_name": "sentence2",
70
+ "audio_column_name": "audio",
71
+ "iterations": 10,
72
+ "warmup_runs": 10,
73
+ "energy": true,
74
+ "forward_kwargs": {},
75
+ "generate_kwargs": {
76
+ "max_new_tokens": 10,
77
+ "min_new_tokens": 10
78
+ },
79
+ "call_kwargs": {}
80
+ },
81
+ "environment": {
82
+ "cpu": " AMD EPYC 7R32",
83
+ "cpu_count": 48,
84
+ "cpu_ram_mb": 200472.73984,
85
+ "system": "Linux",
86
+ "machine": "x86_64",
87
+ "platform": "Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35",
88
+ "processor": "x86_64",
89
+ "python_version": "3.9.20",
90
+ "gpu": [
91
+ "NVIDIA A10G"
92
+ ],
93
+ "gpu_count": 1,
94
+ "gpu_vram_mb": 24146608128,
95
+ "optimum_benchmark_version": "0.2.0",
96
+ "optimum_benchmark_commit": null,
97
+ "transformers_version": "4.44.0",
98
+ "transformers_commit": null,
99
+ "accelerate_version": "0.33.0",
100
+ "accelerate_commit": null,
101
+ "diffusers_version": "0.30.0",
102
+ "diffusers_commit": null,
103
+ "optimum_version": null,
104
+ "optimum_commit": null,
105
+ "timm_version": null,
106
+ "timm_commit": null,
107
+ "peft_version": null,
108
+ "peft_commit": null
109
+ }
110
+ }
text_generation/google/gemma-2-2b/2024-10-24-19-05-37/preprocess_codecarbon.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "timestamp": "2024-10-24T19:05:52",
3
+ "project_name": "codecarbon",
4
+ "run_id": "af8cf9cd-3ff9-45b1-87f5-8bd44c5df4f0",
5
+ "duration": -1729710570.0952306,
6
+ "emissions": 1.4465120447672696e-05,
7
+ "emissions_rate": 1.1696924438465332e-05,
8
+ "cpu_power": 42.5,
9
+ "gpu_power": 71.15502348245434,
10
+ "ram_power": 0.36616086959838867,
11
+ "cpu_energy": 1.4623404627743892e-05,
12
+ "gpu_energy": 2.4437519550057907e-05,
13
+ "ram_energy": 1.2567374983479889e-07,
14
+ "energy_consumed": 3.9186597927636596e-05,
15
+ "country_name": "United States",
16
+ "country_iso_code": "USA",
17
+ "region": "virginia",
18
+ "cloud_provider": "",
19
+ "cloud_region": "",
20
+ "os": "Linux-5.10.192-183.736.amzn2.x86_64-x86_64-with-glibc2.35",
21
+ "python_version": "3.9.20",
22
+ "codecarbon_version": "2.5.1",
23
+ "cpu_count": 48,
24
+ "cpu_model": "AMD EPYC 7R32",
25
+ "gpu_count": 1,
26
+ "gpu_model": "1 x NVIDIA A10G",
27
+ "longitude": -77.4903,
28
+ "latitude": 39.0469,
29
+ "ram_total_size": 186.7047882080078,
30
+ "tracking_mode": "process",
31
+ "on_cloud": "N",
32
+ "pue": 1.0
33
+ }