Commit
•
3780362
1
Parent(s):
ac02f05
Update dataset
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- hf-dgx-01/perf-report.csv +0 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/.hydra/config.yaml +77 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/.hydra/hydra.yaml +176 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/.hydra/overrides.yaml +1 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/cli.log +73 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/forward_codecarbon.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/generate_codecarbon.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/hydra_config.yaml +85 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/inference_results.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/.hydra/config.yaml +77 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/.hydra/hydra.yaml +176 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/.hydra/overrides.yaml +1 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/cli.log +80 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/forward_codecarbon.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/generate_codecarbon.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/hydra_config.yaml +85 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/inference_results.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/.hydra/config.yaml +77 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/.hydra/hydra.yaml +176 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/.hydra/overrides.yaml +1 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/cli.log +73 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/forward_codecarbon.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/generate_codecarbon.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/hydra_config.yaml +85 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/inference_results.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/.hydra/config.yaml +77 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/.hydra/hydra.yaml +176 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/.hydra/overrides.yaml +1 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/cli.log +73 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/forward_codecarbon.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/generate_codecarbon.csv +2 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/hydra_config.yaml +85 -0
- hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/inference_results.csv +2 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/.hydra/config.yaml +80 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/.hydra/hydra.yaml +176 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/.hydra/overrides.yaml +1 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/cli.log +26 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/hydra_config.yaml +88 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/.hydra/config.yaml +80 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/.hydra/hydra.yaml +176 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/.hydra/overrides.yaml +1 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/cli.log +26 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/hydra_config.yaml +88 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/.hydra/config.yaml +80 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/.hydra/hydra.yaml +176 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/.hydra/overrides.yaml +1 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/cli.log +26 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/hydra_config.yaml +88 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase/.hydra/config.yaml +80 -0
- hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase/.hydra/hydra.yaml +176 -0
hf-dgx-01/perf-report.csv
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/.hydra/config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: bfloat16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: null
|
21 |
+
quantization_config: {}
|
22 |
+
data_parallel: false
|
23 |
+
deepspeed_inference: false
|
24 |
+
deepspeed_inference_config: {}
|
25 |
+
peft_strategy: null
|
26 |
+
peft_config: {}
|
27 |
+
benchmark:
|
28 |
+
name: inference
|
29 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
30 |
+
duration: 10
|
31 |
+
warmup_runs: 10
|
32 |
+
memory: true
|
33 |
+
energy: true
|
34 |
+
input_shapes:
|
35 |
+
batch_size: 1
|
36 |
+
sequence_length: 256
|
37 |
+
num_choices: 1
|
38 |
+
feature_size: 80
|
39 |
+
nb_max_frames: 3000
|
40 |
+
audio_sequence_length: 16000
|
41 |
+
new_tokens: 256
|
42 |
+
can_diffuse: ${can_diffuse:${task}}
|
43 |
+
can_generate: ${can_generate:${task}}
|
44 |
+
forward_kwargs: {}
|
45 |
+
generate_kwargs: {}
|
46 |
+
launcher:
|
47 |
+
name: process
|
48 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
49 |
+
device_isolation: true
|
50 |
+
start_method: spawn
|
51 |
+
experiment_name: pytorch+cuda+bfloat16
|
52 |
+
device: cuda
|
53 |
+
model: bigcode/starcoderbase-1b
|
54 |
+
task: ${infer_task:${model}}
|
55 |
+
library: ${infer_library:${model}}
|
56 |
+
hub_kwargs:
|
57 |
+
revision: main
|
58 |
+
cache_dir: null
|
59 |
+
force_download: false
|
60 |
+
local_files_only: false
|
61 |
+
trust_remote_code: true
|
62 |
+
environment:
|
63 |
+
optimum_version: 1.16.2
|
64 |
+
optimum_commit: null
|
65 |
+
transformers_version: 4.37.2
|
66 |
+
transformers_commit: null
|
67 |
+
accelerate_version: 0.27.2
|
68 |
+
accelerate_commit: null
|
69 |
+
diffusers_version: null
|
70 |
+
diffusers_commit: null
|
71 |
+
python_version: 3.10.12
|
72 |
+
system: Linux
|
73 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
74 |
+
cpu_count: 128
|
75 |
+
cpu_ram_mb: 540671
|
76 |
+
gpus:
|
77 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=bigcode/starcoderbase-1b
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=bigcode/starcoderbase-1b
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+bfloat16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/opt-bench
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/opt-bench/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=bigcode/starcoderbase-1b
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/cli.log
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-02-14 19:26:44,460][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-02-14 19:26:44,460][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-02-14 19:26:44,462][process][INFO] - + Launched worker process with PID 1953909.
|
4 |
+
[2024-02-14 19:26:44,463][isolation][INFO] - + Launched device(s) isolation process 1953910.
|
5 |
+
[2024-02-14 19:26:50,764][numexpr.utils][INFO] - Note: detected 128 virtual cores but NumExpr set to maximum of 64, check "NUMEXPR_MAX_THREADS" environment variable.
|
6 |
+
[2024-02-14 19:26:50,764][numexpr.utils][INFO] - Note: NumExpr detected 128 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
7 |
+
[2024-02-14 19:26:50,765][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
8 |
+
[2024-02-14 19:26:50,875][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
9 |
+
[2024-02-14 19:26:51,986][backend][INFO] - Configuring pytorch backend
|
10 |
+
[2024-02-14 19:26:51,986][pytorch][INFO] - + Inferred class AutoModelForCausalLM for task text-generation and model_type gpt_bigcode
|
11 |
+
[2024-02-14 19:26:51,986][pytorch][INFO] - + Disabling gradients
|
12 |
+
[2024-02-14 19:26:51,987][pytorch][INFO] - + Loading model with no weights
|
13 |
+
[2024-02-14 19:26:51,987][pytorch][INFO] - + Creating no weights model directory
|
14 |
+
[2024-02-14 19:26:51,987][pytorch][INFO] - + Saving pretrained config
|
15 |
+
[2024-02-14 19:26:51,988][pytorch][INFO] - + Creating no weights model
|
16 |
+
[2024-02-14 19:26:51,992][pytorch][INFO] - + Saving no weights model
|
17 |
+
[2024-02-14 19:26:51,993][pytorch][INFO] - + Loading no weights model
|
18 |
+
[2024-02-14 19:26:51,993][pytorch][INFO] - + Loading model directly on device: cuda
|
19 |
+
[2024-02-14 19:26:53,977][pytorch][INFO] - + Randomizing model weights
|
20 |
+
[2024-02-14 19:26:53,981][pytorch][INFO] - + Tying model weights after randomization
|
21 |
+
[2024-02-14 19:26:53,982][pytorch][INFO] - + Turning on model's eval mode
|
22 |
+
[2024-02-14 19:26:54,091][benchmark][INFO] - Configuring inference benchmark
|
23 |
+
[2024-02-14 19:26:54,091][inference][INFO] - Running inference benchmark
|
24 |
+
[2024-02-14 19:26:54,091][inference][INFO] - + Updating input shapes with model shapes
|
25 |
+
[2024-02-14 19:26:54,091][inference][INFO] - + Preparing backend for inference
|
26 |
+
[2024-02-14 19:26:54,091][inference][INFO] - + Creating input generator
|
27 |
+
[2024-02-14 19:26:54,091][input-generator][INFO] - Using text-generation task generator
|
28 |
+
[2024-02-14 19:26:54,092][inference][INFO] - + Preparing input for the forward pass
|
29 |
+
[2024-02-14 19:26:54,092][pytorch][INFO] - + Moving inputs tensors to device cuda
|
30 |
+
[2024-02-14 19:26:54,092][inference][INFO] - + Tracking forward pass peak memory
|
31 |
+
[2024-02-14 19:26:54,092][memory][INFO] - Tracking CUDA devices: [0]
|
32 |
+
[2024-02-14 19:26:54,092][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
33 |
+
[2024-02-14 19:26:55,968][inference][INFO] - + Forward pass max memory used: 3930 (MB)
|
34 |
+
[2024-02-14 19:26:55,968][inference][INFO] - + Forward pass max memory reserved: 2472 (MB)
|
35 |
+
[2024-02-14 19:26:55,969][inference][INFO] - + Forward pass max memory allocated: 2414 (MB)
|
36 |
+
[2024-02-14 19:26:55,969][inference][INFO] - + Preparing input for the generation pass
|
37 |
+
[2024-02-14 19:26:55,969][pytorch][INFO] - + Moving inputs tensors to device cuda
|
38 |
+
[2024-02-14 19:26:55,969][inference][INFO] - + Tracking generation pass peak memory
|
39 |
+
[2024-02-14 19:26:55,969][memory][INFO] - Tracking CUDA devices: [0]
|
40 |
+
[2024-02-14 19:26:55,969][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
41 |
+
[2024-02-14 19:27:01,723][inference][INFO] - + Generation pass max memory used: 3938 (MB)
|
42 |
+
[2024-02-14 19:27:01,723][inference][INFO] - + Generation pass max memory reserved: 2478 (MB)
|
43 |
+
[2024-02-14 19:27:01,723][inference][INFO] - + Generation pass max memory allocated: 2417 (MB)
|
44 |
+
[2024-02-14 19:27:01,723][inference][INFO] - + Preparing input for the forward pass
|
45 |
+
[2024-02-14 19:27:01,723][pytorch][INFO] - + Moving inputs tensors to device cuda
|
46 |
+
[2024-02-14 19:27:01,724][inference][INFO] - + Warming up the forward pass
|
47 |
+
[2024-02-14 19:27:01,830][inference][INFO] - + Tracking forward pass latency and throughput
|
48 |
+
[2024-02-14 19:27:11,860][inference][INFO] - + Forward pass latency: 1.09e-02 (s)
|
49 |
+
[2024-02-14 19:27:11,861][inference][INFO] - + Forward pass throughput: 91.70 (samples/s)
|
50 |
+
[2024-02-14 19:27:11,861][inference][INFO] - + Preparing input for the generation pass
|
51 |
+
[2024-02-14 19:27:11,861][pytorch][INFO] - + Moving inputs tensors to device cuda
|
52 |
+
[2024-02-14 19:27:11,861][inference][INFO] - + Warming up the generation pass
|
53 |
+
[2024-02-14 19:27:14,794][inference][INFO] - + Tracking generation latency and throughput
|
54 |
+
[2024-02-14 19:27:26,489][inference][INFO] - + Generation pass latency: 2.92e+00 (s)
|
55 |
+
[2024-02-14 19:27:26,490][inference][INFO] - + Generation pass throughput: 87.70 (tokens/s)
|
56 |
+
[2024-02-14 19:27:26,490][inference][INFO] - + Preparing input for the forward pass
|
57 |
+
[2024-02-14 19:27:26,490][pytorch][INFO] - + Moving inputs tensors to device cuda
|
58 |
+
[2024-02-14 19:27:26,490][inference][INFO] - + Tracking forward pass energy consumption
|
59 |
+
[2024-02-14 19:27:42,565][inference][INFO] - + Forward pass energy consumption: 1.14e-06 (kWh/sample)
|
60 |
+
[2024-02-14 19:27:42,565][inference][INFO] - + Forward pass carbon emissions: 7.72e-08 (kgCO2eq/sample)
|
61 |
+
[2024-02-14 19:27:42,565][inference][INFO] - + Full details in the CodeCarbon report: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/forward_codecarbon.csv
|
62 |
+
[2024-02-14 19:27:42,566][inference][INFO] - + Preparing input for the generation pass
|
63 |
+
[2024-02-14 19:27:42,566][pytorch][INFO] - + Moving inputs tensors to device cuda
|
64 |
+
[2024-02-14 19:27:42,566][inference][INFO] - + Tracking generation pass energy consumption
|
65 |
+
[2024-02-14 19:27:59,895][inference][INFO] - + Generation pass energy consumption: 9.15e-07 (kWh/token)
|
66 |
+
[2024-02-14 19:27:59,896][inference][INFO] - + Generation pass carbon emissions: 6.16e-08 (kgCO2eq/token)
|
67 |
+
[2024-02-14 19:27:59,896][inference][INFO] - + Full details in the CodeCarbon report: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/generate_codecarbon.csv
|
68 |
+
[2024-02-14 19:27:59,896][inference][INFO] - Saving results
|
69 |
+
[2024-02-14 19:27:59,899][backend][INFO] - Cleaning pytorch backend
|
70 |
+
[2024-02-14 19:27:59,899][backend][INFO] - + Deleting pretrained model
|
71 |
+
[2024-02-14 19:28:00,128][pytorch][INFO] - + Cleaning temporary directory
|
72 |
+
[2024-02-14 19:28:00,128][pytorch][INFO] - + Emptying CUDA cache
|
73 |
+
[2024-02-14 19:28:02,208][isolation][INFO] - + Closing device(s) isolation process...
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-02-14T19:27:42,codecarbon,4ac85be7-6246-4fd4-8388-098bcecf1b49,10.569979667663574,7.113921164897584e-05,6.7303073313007325e-06,112.5,615.1367847265042,0.34110403060913086,0.00033028850704431537,0.0007245664129857943,7.796199457894204e-07,0.001055634539975899,France,FRA,île-de-france,,,Linux-5.4.0-166-generic-x86_64-with-glibc2.35,3.10.12,2.3.4,128,AMD EPYC 7742 64-Core Processor,1,1 x NVIDIA A100-SXM4-80GB,2.4075,48.8323,503.5396919250488,process,N,1.0
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-02-14T19:27:59,codecarbon,943bf2df-34c1-40c0-8c00-50a5b50c477d,12.401583433151245,6.31205777395431e-05,5.089719234627134e-06,112.5,208.46505591460803,0.3414473533630371,0.0003875252157449722,0.0005481851607687105,9.356798677255066e-07,0.0009366460563814082,France,FRA,île-de-france,,,Linux-5.4.0-166-generic-x86_64-with-glibc2.35,3.10.12,2.3.4,128,AMD EPYC 7742 64-Core Processor,1,1 x NVIDIA A100-SXM4-80GB,2.4075,48.8323,503.5396919250488,process,N,1.0
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/hydra_config.yaml
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: bfloat16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: null
|
26 |
+
quantization_config: {}
|
27 |
+
data_parallel: false
|
28 |
+
deepspeed_inference: false
|
29 |
+
deepspeed_inference_config: {}
|
30 |
+
peft_strategy: null
|
31 |
+
peft_config: {}
|
32 |
+
benchmark:
|
33 |
+
name: inference
|
34 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
35 |
+
duration: 10
|
36 |
+
warmup_runs: 10
|
37 |
+
memory: true
|
38 |
+
energy: true
|
39 |
+
input_shapes:
|
40 |
+
batch_size: 1
|
41 |
+
sequence_length: 256
|
42 |
+
num_choices: 1
|
43 |
+
feature_size: 80
|
44 |
+
nb_max_frames: 3000
|
45 |
+
audio_sequence_length: 16000
|
46 |
+
new_tokens: 256
|
47 |
+
can_diffuse: false
|
48 |
+
can_generate: true
|
49 |
+
forward_kwargs: {}
|
50 |
+
generate_kwargs:
|
51 |
+
num_return_sequences: 1
|
52 |
+
max_new_tokens: 256
|
53 |
+
min_new_tokens: 256
|
54 |
+
do_sample: false
|
55 |
+
use_cache: true
|
56 |
+
pad_token_id: 0
|
57 |
+
temperature: 1.0
|
58 |
+
num_beams: 1
|
59 |
+
experiment_name: pytorch+cuda+bfloat16
|
60 |
+
device: cuda
|
61 |
+
model: bigcode/starcoderbase-1b
|
62 |
+
task: text-generation
|
63 |
+
library: transformers
|
64 |
+
hub_kwargs:
|
65 |
+
revision: main
|
66 |
+
cache_dir: null
|
67 |
+
force_download: false
|
68 |
+
local_files_only: false
|
69 |
+
trust_remote_code: true
|
70 |
+
environment:
|
71 |
+
optimum_version: 1.16.2
|
72 |
+
optimum_commit: null
|
73 |
+
transformers_version: 4.37.2
|
74 |
+
transformers_commit: null
|
75 |
+
accelerate_version: 0.27.2
|
76 |
+
accelerate_commit: null
|
77 |
+
diffusers_version: null
|
78 |
+
diffusers_commit: null
|
79 |
+
python_version: 3.10.12
|
80 |
+
system: Linux
|
81 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
82 |
+
cpu_count: 128
|
83 |
+
cpu_ram_mb: 540671
|
84 |
+
gpus:
|
85 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-1b/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.0109,91.7,3930,3930,2414,2472,1.14e-06,7.72e-08,2.92,87.7,2.91,87.6,3938,3938,2417,2478,9.15e-07,6.16e-08
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/.hydra/config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: bfloat16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: null
|
21 |
+
quantization_config: {}
|
22 |
+
data_parallel: false
|
23 |
+
deepspeed_inference: false
|
24 |
+
deepspeed_inference_config: {}
|
25 |
+
peft_strategy: null
|
26 |
+
peft_config: {}
|
27 |
+
benchmark:
|
28 |
+
name: inference
|
29 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
30 |
+
duration: 10
|
31 |
+
warmup_runs: 10
|
32 |
+
memory: true
|
33 |
+
energy: true
|
34 |
+
input_shapes:
|
35 |
+
batch_size: 1
|
36 |
+
sequence_length: 256
|
37 |
+
num_choices: 1
|
38 |
+
feature_size: 80
|
39 |
+
nb_max_frames: 3000
|
40 |
+
audio_sequence_length: 16000
|
41 |
+
new_tokens: 256
|
42 |
+
can_diffuse: ${can_diffuse:${task}}
|
43 |
+
can_generate: ${can_generate:${task}}
|
44 |
+
forward_kwargs: {}
|
45 |
+
generate_kwargs: {}
|
46 |
+
launcher:
|
47 |
+
name: process
|
48 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
49 |
+
device_isolation: true
|
50 |
+
start_method: spawn
|
51 |
+
experiment_name: pytorch+cuda+bfloat16
|
52 |
+
device: cuda
|
53 |
+
model: bigcode/starcoderbase-3b
|
54 |
+
task: ${infer_task:${model}}
|
55 |
+
library: ${infer_library:${model}}
|
56 |
+
hub_kwargs:
|
57 |
+
revision: main
|
58 |
+
cache_dir: null
|
59 |
+
force_download: false
|
60 |
+
local_files_only: false
|
61 |
+
trust_remote_code: true
|
62 |
+
environment:
|
63 |
+
optimum_version: 1.16.2
|
64 |
+
optimum_commit: null
|
65 |
+
transformers_version: 4.37.2
|
66 |
+
transformers_commit: null
|
67 |
+
accelerate_version: 0.27.2
|
68 |
+
accelerate_commit: null
|
69 |
+
diffusers_version: null
|
70 |
+
diffusers_commit: null
|
71 |
+
python_version: 3.10.12
|
72 |
+
system: Linux
|
73 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
74 |
+
cpu_count: 128
|
75 |
+
cpu_ram_mb: 540671
|
76 |
+
gpus:
|
77 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=bigcode/starcoderbase-3b
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=bigcode/starcoderbase-3b
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+bfloat16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/opt-bench
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/opt-bench/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=bigcode/starcoderbase-3b
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/cli.log
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-02-14 19:28:08,098][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-02-14 19:28:08,098][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-02-14 19:28:08,100][process][INFO] - + Launched worker process with PID 1955027.
|
4 |
+
[2024-02-14 19:28:08,101][isolation][INFO] - + Launched device(s) isolation process 1955028.
|
5 |
+
[2024-02-14 19:28:14,735][isolation][INFO] - + Closing device(s) isolation process...
|
6 |
+
[2024-02-14 19:28:15,121][process][ERROR] - Worker process exited with code 1, forwarding...
|
7 |
+
[2024-02-14 20:26:50,732][launcher][INFO] - Configuring process launcher
|
8 |
+
[2024-02-14 20:26:50,732][process][INFO] - Setting multiprocessing start method to spawn.
|
9 |
+
[2024-02-14 20:26:50,735][process][INFO] - + Launched worker process with PID 2018531.
|
10 |
+
[2024-02-14 20:26:50,735][isolation][INFO] - + Launched device(s) isolation process 2018532.
|
11 |
+
[2024-02-14 20:26:56,346][isolation][ERROR] - Found non-permitted process(es) running on system device(s): {1988336, 2017825}
|
12 |
+
[2024-02-14 20:26:56,346][isolation][ERROR] - Terminating benchmark process 2018318
|
13 |
+
[2024-02-14 20:26:57,030][numexpr.utils][INFO] - Note: detected 128 virtual cores but NumExpr set to maximum of 64, check "NUMEXPR_MAX_THREADS" environment variable.
|
14 |
+
[2024-02-14 20:26:57,030][numexpr.utils][INFO] - Note: NumExpr detected 128 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
15 |
+
[2024-02-14 20:26:57,030][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
16 |
+
[2024-02-14 20:26:57,144][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
17 |
+
[2024-02-14 20:26:58,313][backend][INFO] - Configuring pytorch backend
|
18 |
+
[2024-02-14 20:26:58,313][pytorch][INFO] - + Inferred class AutoModelForCausalLM for task text-generation and model_type gpt_bigcode
|
19 |
+
[2024-02-14 20:26:58,313][pytorch][INFO] - + Disabling gradients
|
20 |
+
[2024-02-14 20:26:58,313][pytorch][INFO] - + Loading model with no weights
|
21 |
+
[2024-02-14 20:26:58,314][pytorch][INFO] - + Creating no weights model directory
|
22 |
+
[2024-02-14 20:26:58,314][pytorch][INFO] - + Saving pretrained config
|
23 |
+
[2024-02-14 20:26:58,315][pytorch][INFO] - + Creating no weights model
|
24 |
+
[2024-02-14 20:26:58,319][pytorch][INFO] - + Saving no weights model
|
25 |
+
[2024-02-14 20:26:58,320][pytorch][INFO] - + Loading no weights model
|
26 |
+
[2024-02-14 20:26:58,320][pytorch][INFO] - + Loading model directly on device: cuda
|
27 |
+
[2024-02-14 20:27:00,044][pytorch][INFO] - + Randomizing model weights
|
28 |
+
[2024-02-14 20:27:00,050][pytorch][INFO] - + Tying model weights after randomization
|
29 |
+
[2024-02-14 20:27:00,052][pytorch][INFO] - + Turning on model's eval mode
|
30 |
+
[2024-02-14 20:27:00,163][benchmark][INFO] - Configuring inference benchmark
|
31 |
+
[2024-02-14 20:27:00,163][inference][INFO] - Running inference benchmark
|
32 |
+
[2024-02-14 20:27:00,163][inference][INFO] - + Updating input shapes with model shapes
|
33 |
+
[2024-02-14 20:27:00,163][inference][INFO] - + Preparing backend for inference
|
34 |
+
[2024-02-14 20:27:00,163][inference][INFO] - + Creating input generator
|
35 |
+
[2024-02-14 20:27:00,163][input-generator][INFO] - Using text-generation task generator
|
36 |
+
[2024-02-14 20:27:00,164][inference][INFO] - + Preparing input for the forward pass
|
37 |
+
[2024-02-14 20:27:00,164][pytorch][INFO] - + Moving inputs tensors to device cuda
|
38 |
+
[2024-02-14 20:27:00,174][inference][INFO] - + Tracking forward pass peak memory
|
39 |
+
[2024-02-14 20:27:00,174][memory][INFO] - Tracking CUDA devices: [0]
|
40 |
+
[2024-02-14 20:27:00,174][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
41 |
+
[2024-02-14 20:27:03,328][inference][INFO] - + Forward pass max memory used: 20568 (MB)
|
42 |
+
[2024-02-14 20:27:03,328][inference][INFO] - + Forward pass max memory reserved: 6404 (MB)
|
43 |
+
[2024-02-14 20:27:03,328][inference][INFO] - + Forward pass max memory allocated: 6278 (MB)
|
44 |
+
[2024-02-14 20:27:03,328][inference][INFO] - + Preparing input for the generation pass
|
45 |
+
[2024-02-14 20:27:03,328][pytorch][INFO] - + Moving inputs tensors to device cuda
|
46 |
+
[2024-02-14 20:27:03,355][inference][INFO] - + Tracking generation pass peak memory
|
47 |
+
[2024-02-14 20:27:03,355][memory][INFO] - Tracking CUDA devices: [0]
|
48 |
+
[2024-02-14 20:27:03,355][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
49 |
+
[2024-02-14 20:27:13,280][inference][INFO] - + Generation pass max memory used: 52928 (MB)
|
50 |
+
[2024-02-14 20:27:13,280][inference][INFO] - + Generation pass max memory reserved: 6425 (MB)
|
51 |
+
[2024-02-14 20:27:13,280][inference][INFO] - + Generation pass max memory allocated: 6282 (MB)
|
52 |
+
[2024-02-14 20:27:13,280][inference][INFO] - + Preparing input for the forward pass
|
53 |
+
[2024-02-14 20:27:13,281][pytorch][INFO] - + Moving inputs tensors to device cuda
|
54 |
+
[2024-02-14 20:27:13,292][inference][INFO] - + Warming up the forward pass
|
55 |
+
[2024-02-14 20:27:13,704][inference][INFO] - + Tracking forward pass latency and throughput
|
56 |
+
[2024-02-14 20:27:23,774][inference][INFO] - + Forward pass latency: 4.71e-02 (s)
|
57 |
+
[2024-02-14 20:27:23,774][inference][INFO] - + Forward pass throughput: 21.20 (samples/s)
|
58 |
+
[2024-02-14 20:27:23,775][inference][INFO] - + Preparing input for the generation pass
|
59 |
+
[2024-02-14 20:27:23,775][pytorch][INFO] - + Moving inputs tensors to device cuda
|
60 |
+
[2024-02-14 20:27:23,778][inference][INFO] - + Warming up the generation pass
|
61 |
+
[2024-02-14 20:27:29,395][inference][INFO] - + Tracking generation latency and throughput
|
62 |
+
[2024-02-14 20:27:41,732][inference][INFO] - + Generation pass latency: 6.17e+00 (s)
|
63 |
+
[2024-02-14 20:27:41,733][inference][INFO] - + Generation pass throughput: 41.50 (tokens/s)
|
64 |
+
[2024-02-14 20:27:41,733][inference][INFO] - + Preparing input for the forward pass
|
65 |
+
[2024-02-14 20:27:41,733][pytorch][INFO] - + Moving inputs tensors to device cuda
|
66 |
+
[2024-02-14 20:27:41,734][inference][INFO] - + Tracking forward pass energy consumption
|
67 |
+
[2024-02-14 20:27:57,881][inference][INFO] - + Forward pass energy consumption: 4.71e-06 (kWh/sample)
|
68 |
+
[2024-02-14 20:27:57,881][inference][INFO] - + Forward pass carbon emissions: 3.18e-07 (kgCO2eq/sample)
|
69 |
+
[2024-02-14 20:27:57,881][inference][INFO] - + Full details in the CodeCarbon report: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/forward_codecarbon.csv
|
70 |
+
[2024-02-14 20:27:57,881][inference][INFO] - + Preparing input for the generation pass
|
71 |
+
[2024-02-14 20:27:57,882][pytorch][INFO] - + Moving inputs tensors to device cuda
|
72 |
+
[2024-02-14 20:27:57,885][inference][INFO] - + Tracking generation pass energy consumption
|
73 |
+
[2024-02-14 20:28:17,303][inference][INFO] - + Generation pass energy consumption: 2.73e-06 (kWh/token)
|
74 |
+
[2024-02-14 20:28:17,303][inference][INFO] - + Generation pass carbon emissions: 1.84e-07 (kgCO2eq/token)
|
75 |
+
[2024-02-14 20:28:17,303][inference][INFO] - + Full details in the CodeCarbon report: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/generate_codecarbon.csv
|
76 |
+
[2024-02-14 20:28:17,303][inference][INFO] - Saving results
|
77 |
+
[2024-02-14 20:28:17,305][backend][INFO] - Cleaning pytorch backend
|
78 |
+
[2024-02-14 20:28:17,305][backend][INFO] - + Deleting pretrained model
|
79 |
+
[2024-02-14 20:28:17,540][pytorch][INFO] - + Cleaning temporary directory
|
80 |
+
[2024-02-14 20:28:17,541][pytorch][INFO] - + Emptying CUDA cache
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-02-14T20:27:57,codecarbon,034cbdf7-bfd1-4f08-92b6-5837690b2e0c,10.64318060874939,7.335821861880229e-05,6.892509045510046e-06,112.5,769.2913669213469,0.34687042236328125,0.00033257462829351436,0.0007552114375037888,7.763116890048612e-07,0.0010885623774863078,France,FRA,île-de-france,,,Linux-5.4.0-166-generic-x86_64-with-glibc2.35,3.10.12,2.3.4,128,AMD EPYC 7742 64-Core Processor,1,1 x NVIDIA A100-SXM4-80GB,2.4075,48.8323,503.5396919250488,process,N,1.0
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-02-14T20:28:17,codecarbon,0fc3d538-cc37-4a10-9cbb-ce4841c1e09b,13.590092897415161,9.40505366891559e-05,6.920521986059737e-06,112.5,264.50751714613153,0.34786319732666016,0.00042466054111719136,0.0009699432759529003,1.0118037810126645e-06,0.0013956156208511038,France,FRA,île-de-france,,,Linux-5.4.0-166-generic-x86_64-with-glibc2.35,3.10.12,2.3.4,128,AMD EPYC 7742 64-Core Processor,1,1 x NVIDIA A100-SXM4-80GB,2.4075,48.8323,503.5396919250488,process,N,1.0
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/hydra_config.yaml
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: bfloat16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: null
|
26 |
+
quantization_config: {}
|
27 |
+
data_parallel: false
|
28 |
+
deepspeed_inference: false
|
29 |
+
deepspeed_inference_config: {}
|
30 |
+
peft_strategy: null
|
31 |
+
peft_config: {}
|
32 |
+
benchmark:
|
33 |
+
name: inference
|
34 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
35 |
+
duration: 10
|
36 |
+
warmup_runs: 10
|
37 |
+
memory: true
|
38 |
+
energy: true
|
39 |
+
input_shapes:
|
40 |
+
batch_size: 1
|
41 |
+
sequence_length: 256
|
42 |
+
num_choices: 1
|
43 |
+
feature_size: 80
|
44 |
+
nb_max_frames: 3000
|
45 |
+
audio_sequence_length: 16000
|
46 |
+
new_tokens: 256
|
47 |
+
can_diffuse: false
|
48 |
+
can_generate: true
|
49 |
+
forward_kwargs: {}
|
50 |
+
generate_kwargs:
|
51 |
+
num_return_sequences: 1
|
52 |
+
max_new_tokens: 256
|
53 |
+
min_new_tokens: 256
|
54 |
+
do_sample: false
|
55 |
+
use_cache: true
|
56 |
+
pad_token_id: 0
|
57 |
+
temperature: 1.0
|
58 |
+
num_beams: 1
|
59 |
+
experiment_name: pytorch+cuda+bfloat16
|
60 |
+
device: cuda
|
61 |
+
model: bigcode/starcoderbase-3b
|
62 |
+
task: text-generation
|
63 |
+
library: transformers
|
64 |
+
hub_kwargs:
|
65 |
+
revision: main
|
66 |
+
cache_dir: null
|
67 |
+
force_download: false
|
68 |
+
local_files_only: false
|
69 |
+
trust_remote_code: true
|
70 |
+
environment:
|
71 |
+
optimum_version: 1.16.2
|
72 |
+
optimum_commit: null
|
73 |
+
transformers_version: 4.37.2
|
74 |
+
transformers_commit: null
|
75 |
+
accelerate_version: 0.27.2
|
76 |
+
accelerate_commit: null
|
77 |
+
diffusers_version: null
|
78 |
+
diffusers_commit: null
|
79 |
+
python_version: 3.10.12
|
80 |
+
system: Linux
|
81 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
82 |
+
cpu_count: 128
|
83 |
+
cpu_ram_mb: 540671
|
84 |
+
gpus:
|
85 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-3b/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.0471,21.2,20568,20568,6278,6404,4.71e-06,3.18e-07,6.17,41.5,6.12,41.7,52928,52928,6282,6425,2.73e-06,1.84e-07
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/.hydra/config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: bfloat16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: null
|
21 |
+
quantization_config: {}
|
22 |
+
data_parallel: false
|
23 |
+
deepspeed_inference: false
|
24 |
+
deepspeed_inference_config: {}
|
25 |
+
peft_strategy: null
|
26 |
+
peft_config: {}
|
27 |
+
benchmark:
|
28 |
+
name: inference
|
29 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
30 |
+
duration: 10
|
31 |
+
warmup_runs: 10
|
32 |
+
memory: true
|
33 |
+
energy: true
|
34 |
+
input_shapes:
|
35 |
+
batch_size: 1
|
36 |
+
sequence_length: 256
|
37 |
+
num_choices: 1
|
38 |
+
feature_size: 80
|
39 |
+
nb_max_frames: 3000
|
40 |
+
audio_sequence_length: 16000
|
41 |
+
new_tokens: 256
|
42 |
+
can_diffuse: ${can_diffuse:${task}}
|
43 |
+
can_generate: ${can_generate:${task}}
|
44 |
+
forward_kwargs: {}
|
45 |
+
generate_kwargs: {}
|
46 |
+
launcher:
|
47 |
+
name: process
|
48 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
49 |
+
device_isolation: true
|
50 |
+
start_method: spawn
|
51 |
+
experiment_name: pytorch+cuda+bfloat16
|
52 |
+
device: cuda
|
53 |
+
model: bigcode/starcoderbase-7b
|
54 |
+
task: ${infer_task:${model}}
|
55 |
+
library: ${infer_library:${model}}
|
56 |
+
hub_kwargs:
|
57 |
+
revision: main
|
58 |
+
cache_dir: null
|
59 |
+
force_download: false
|
60 |
+
local_files_only: false
|
61 |
+
trust_remote_code: true
|
62 |
+
environment:
|
63 |
+
optimum_version: 1.16.2
|
64 |
+
optimum_commit: null
|
65 |
+
transformers_version: 4.37.2
|
66 |
+
transformers_commit: null
|
67 |
+
accelerate_version: 0.27.2
|
68 |
+
accelerate_commit: null
|
69 |
+
diffusers_version: null
|
70 |
+
diffusers_commit: null
|
71 |
+
python_version: 3.10.12
|
72 |
+
system: Linux
|
73 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
74 |
+
cpu_count: 128
|
75 |
+
cpu_ram_mb: 540671
|
76 |
+
gpus:
|
77 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=bigcode/starcoderbase-7b
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=bigcode/starcoderbase-7b
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+bfloat16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/opt-bench
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/opt-bench/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=bigcode/starcoderbase-7b
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/cli.log
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-02-14 19:28:19,825][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-02-14 19:28:19,825][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-02-14 19:28:19,828][process][INFO] - + Launched worker process with PID 1955507.
|
4 |
+
[2024-02-14 19:28:19,828][isolation][INFO] - + Launched device(s) isolation process 1955508.
|
5 |
+
[2024-02-14 19:28:26,394][numexpr.utils][INFO] - Note: detected 128 virtual cores but NumExpr set to maximum of 64, check "NUMEXPR_MAX_THREADS" environment variable.
|
6 |
+
[2024-02-14 19:28:26,394][numexpr.utils][INFO] - Note: NumExpr detected 128 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
7 |
+
[2024-02-14 19:28:26,394][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
8 |
+
[2024-02-14 19:28:26,504][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
9 |
+
[2024-02-14 19:28:29,160][backend][INFO] - Configuring pytorch backend
|
10 |
+
[2024-02-14 19:28:29,160][pytorch][INFO] - + Inferred class AutoModelForCausalLM for task text-generation and model_type gpt_bigcode
|
11 |
+
[2024-02-14 19:28:29,160][pytorch][INFO] - + Disabling gradients
|
12 |
+
[2024-02-14 19:28:29,161][pytorch][INFO] - + Loading model with no weights
|
13 |
+
[2024-02-14 19:28:29,161][pytorch][INFO] - + Creating no weights model directory
|
14 |
+
[2024-02-14 19:28:29,161][pytorch][INFO] - + Saving pretrained config
|
15 |
+
[2024-02-14 19:28:29,162][pytorch][INFO] - + Creating no weights model
|
16 |
+
[2024-02-14 19:28:29,166][pytorch][INFO] - + Saving no weights model
|
17 |
+
[2024-02-14 19:28:29,166][pytorch][INFO] - + Loading no weights model
|
18 |
+
[2024-02-14 19:28:29,167][pytorch][INFO] - + Loading model directly on device: cuda
|
19 |
+
[2024-02-14 19:28:31,185][pytorch][INFO] - + Randomizing model weights
|
20 |
+
[2024-02-14 19:28:31,192][pytorch][INFO] - + Tying model weights after randomization
|
21 |
+
[2024-02-14 19:28:31,193][pytorch][INFO] - + Turning on model's eval mode
|
22 |
+
[2024-02-14 19:28:31,303][benchmark][INFO] - Configuring inference benchmark
|
23 |
+
[2024-02-14 19:28:31,303][inference][INFO] - Running inference benchmark
|
24 |
+
[2024-02-14 19:28:31,303][inference][INFO] - + Updating input shapes with model shapes
|
25 |
+
[2024-02-14 19:28:31,303][inference][INFO] - + Preparing backend for inference
|
26 |
+
[2024-02-14 19:28:31,303][inference][INFO] - + Creating input generator
|
27 |
+
[2024-02-14 19:28:31,303][input-generator][INFO] - Using text-generation task generator
|
28 |
+
[2024-02-14 19:28:31,304][inference][INFO] - + Preparing input for the forward pass
|
29 |
+
[2024-02-14 19:28:31,304][pytorch][INFO] - + Moving inputs tensors to device cuda
|
30 |
+
[2024-02-14 19:28:31,304][inference][INFO] - + Tracking forward pass peak memory
|
31 |
+
[2024-02-14 19:28:31,304][memory][INFO] - Tracking CUDA devices: [0]
|
32 |
+
[2024-02-14 19:28:31,304][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
33 |
+
[2024-02-14 19:28:33,213][inference][INFO] - + Forward pass max memory used: 16374 (MB)
|
34 |
+
[2024-02-14 19:28:33,214][inference][INFO] - + Forward pass max memory reserved: 14917 (MB)
|
35 |
+
[2024-02-14 19:28:33,214][inference][INFO] - + Forward pass max memory allocated: 14853 (MB)
|
36 |
+
[2024-02-14 19:28:33,214][inference][INFO] - + Preparing input for the generation pass
|
37 |
+
[2024-02-14 19:28:33,214][pytorch][INFO] - + Moving inputs tensors to device cuda
|
38 |
+
[2024-02-14 19:28:33,219][inference][INFO] - + Tracking generation pass peak memory
|
39 |
+
[2024-02-14 19:28:33,219][memory][INFO] - Tracking CUDA devices: [0]
|
40 |
+
[2024-02-14 19:28:33,219][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
41 |
+
[2024-02-14 19:28:41,827][inference][INFO] - + Generation pass max memory used: 16399 (MB)
|
42 |
+
[2024-02-14 19:28:41,827][inference][INFO] - + Generation pass max memory reserved: 14940 (MB)
|
43 |
+
[2024-02-14 19:28:41,827][inference][INFO] - + Generation pass max memory allocated: 14855 (MB)
|
44 |
+
[2024-02-14 19:28:41,827][inference][INFO] - + Preparing input for the forward pass
|
45 |
+
[2024-02-14 19:28:41,827][pytorch][INFO] - + Moving inputs tensors to device cuda
|
46 |
+
[2024-02-14 19:28:41,827][inference][INFO] - + Warming up the forward pass
|
47 |
+
[2024-02-14 19:28:42,106][inference][INFO] - + Tracking forward pass latency and throughput
|
48 |
+
[2024-02-14 19:28:52,132][inference][INFO] - + Forward pass latency: 3.15e-02 (s)
|
49 |
+
[2024-02-14 19:28:52,133][inference][INFO] - + Forward pass throughput: 31.70 (samples/s)
|
50 |
+
[2024-02-14 19:28:52,133][inference][INFO] - + Preparing input for the generation pass
|
51 |
+
[2024-02-14 19:28:52,133][pytorch][INFO] - + Moving inputs tensors to device cuda
|
52 |
+
[2024-02-14 19:28:52,133][inference][INFO] - + Warming up the generation pass
|
53 |
+
[2024-02-14 19:28:57,019][inference][INFO] - + Tracking generation latency and throughput
|
54 |
+
[2024-02-14 19:29:11,714][inference][INFO] - + Generation pass latency: 4.90e+00 (s)
|
55 |
+
[2024-02-14 19:29:11,714][inference][INFO] - + Generation pass throughput: 52.20 (tokens/s)
|
56 |
+
[2024-02-14 19:29:11,714][inference][INFO] - + Preparing input for the forward pass
|
57 |
+
[2024-02-14 19:29:11,715][pytorch][INFO] - + Moving inputs tensors to device cuda
|
58 |
+
[2024-02-14 19:29:11,715][inference][INFO] - + Tracking forward pass energy consumption
|
59 |
+
[2024-02-14 19:29:28,481][inference][INFO] - + Forward pass energy consumption: 3.27e-06 (kWh/sample)
|
60 |
+
[2024-02-14 19:29:28,481][inference][INFO] - + Forward pass carbon emissions: 2.21e-07 (kgCO2eq/sample)
|
61 |
+
[2024-02-14 19:29:28,481][inference][INFO] - + Full details in the CodeCarbon report: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/forward_codecarbon.csv
|
62 |
+
[2024-02-14 19:29:28,481][inference][INFO] - + Preparing input for the generation pass
|
63 |
+
[2024-02-14 19:29:28,481][pytorch][INFO] - + Moving inputs tensors to device cuda
|
64 |
+
[2024-02-14 19:29:28,482][inference][INFO] - + Tracking generation pass energy consumption
|
65 |
+
[2024-02-14 19:29:48,958][inference][INFO] - + Generation pass energy consumption: 2.01e-06 (kWh/token)
|
66 |
+
[2024-02-14 19:29:48,958][inference][INFO] - + Generation pass carbon emissions: 1.35e-07 (kgCO2eq/token)
|
67 |
+
[2024-02-14 19:29:48,958][inference][INFO] - + Full details in the CodeCarbon report: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/generate_codecarbon.csv
|
68 |
+
[2024-02-14 19:29:48,958][inference][INFO] - Saving results
|
69 |
+
[2024-02-14 19:29:48,960][backend][INFO] - Cleaning pytorch backend
|
70 |
+
[2024-02-14 19:29:48,960][backend][INFO] - + Deleting pretrained model
|
71 |
+
[2024-02-14 19:29:49,186][pytorch][INFO] - + Cleaning temporary directory
|
72 |
+
[2024-02-14 19:29:49,187][pytorch][INFO] - + Emptying CUDA cache
|
73 |
+
[2024-02-14 19:29:52,609][isolation][INFO] - + Closing device(s) isolation process...
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-02-14T19:29:28,codecarbon,ba80498a-3787-4557-b10e-7a0cb676452b,10.584910154342651,7.541600888682554e-05,7.124860559717151e-06,112.5,858.5099045401266,0.3457632064819336,0.0003307555988430977,0.0007876086856413167,7.336363766264261e-07,0.0011190979208610407,France,FRA,île-de-france,,,Linux-5.4.0-166-generic-x86_64-with-glibc2.35,3.10.12,2.3.4,128,AMD EPYC 7742 64-Core Processor,1,1 x NVIDIA A100-SXM4-80GB,2.4075,48.8323,503.5396919250488,process,N,1.0
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-02-14T19:29:48,codecarbon,ed6e1fb2-8e33-450c-a013-402d161331e3,15.274165868759155,0.00010402625471507791,6.8106013519106045e-06,112.5,514.0111410067171,0.34665727615356445,0.00047728438675403604,0.0010652127966128688,1.1480861846295432e-06,0.0015436452695515345,France,FRA,île-de-france,,,Linux-5.4.0-166-generic-x86_64-with-glibc2.35,3.10.12,2.3.4,128,AMD EPYC 7742 64-Core Processor,1,1 x NVIDIA A100-SXM4-80GB,2.4075,48.8323,503.5396919250488,process,N,1.0
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/hydra_config.yaml
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: bfloat16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: null
|
26 |
+
quantization_config: {}
|
27 |
+
data_parallel: false
|
28 |
+
deepspeed_inference: false
|
29 |
+
deepspeed_inference_config: {}
|
30 |
+
peft_strategy: null
|
31 |
+
peft_config: {}
|
32 |
+
benchmark:
|
33 |
+
name: inference
|
34 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
35 |
+
duration: 10
|
36 |
+
warmup_runs: 10
|
37 |
+
memory: true
|
38 |
+
energy: true
|
39 |
+
input_shapes:
|
40 |
+
batch_size: 1
|
41 |
+
sequence_length: 256
|
42 |
+
num_choices: 1
|
43 |
+
feature_size: 80
|
44 |
+
nb_max_frames: 3000
|
45 |
+
audio_sequence_length: 16000
|
46 |
+
new_tokens: 256
|
47 |
+
can_diffuse: false
|
48 |
+
can_generate: true
|
49 |
+
forward_kwargs: {}
|
50 |
+
generate_kwargs:
|
51 |
+
num_return_sequences: 1
|
52 |
+
max_new_tokens: 256
|
53 |
+
min_new_tokens: 256
|
54 |
+
do_sample: false
|
55 |
+
use_cache: true
|
56 |
+
pad_token_id: 0
|
57 |
+
temperature: 1.0
|
58 |
+
num_beams: 1
|
59 |
+
experiment_name: pytorch+cuda+bfloat16
|
60 |
+
device: cuda
|
61 |
+
model: bigcode/starcoderbase-7b
|
62 |
+
task: text-generation
|
63 |
+
library: transformers
|
64 |
+
hub_kwargs:
|
65 |
+
revision: main
|
66 |
+
cache_dir: null
|
67 |
+
force_download: false
|
68 |
+
local_files_only: false
|
69 |
+
trust_remote_code: true
|
70 |
+
environment:
|
71 |
+
optimum_version: 1.16.2
|
72 |
+
optimum_commit: null
|
73 |
+
transformers_version: 4.37.2
|
74 |
+
transformers_commit: null
|
75 |
+
accelerate_version: 0.27.2
|
76 |
+
accelerate_commit: null
|
77 |
+
diffusers_version: null
|
78 |
+
diffusers_commit: null
|
79 |
+
python_version: 3.10.12
|
80 |
+
system: Linux
|
81 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
82 |
+
cpu_count: 128
|
83 |
+
cpu_ram_mb: 540671
|
84 |
+
gpus:
|
85 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase-7b/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.0315,31.7,16374,16374,14853,14917,3.27e-06,2.21e-07,4.9,52.2,4.87,52.4,16399,16399,14855,14940,2.01e-06,1.35e-07
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/.hydra/config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: bfloat16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: null
|
21 |
+
quantization_config: {}
|
22 |
+
data_parallel: false
|
23 |
+
deepspeed_inference: false
|
24 |
+
deepspeed_inference_config: {}
|
25 |
+
peft_strategy: null
|
26 |
+
peft_config: {}
|
27 |
+
benchmark:
|
28 |
+
name: inference
|
29 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
30 |
+
duration: 10
|
31 |
+
warmup_runs: 10
|
32 |
+
memory: true
|
33 |
+
energy: true
|
34 |
+
input_shapes:
|
35 |
+
batch_size: 1
|
36 |
+
sequence_length: 256
|
37 |
+
num_choices: 1
|
38 |
+
feature_size: 80
|
39 |
+
nb_max_frames: 3000
|
40 |
+
audio_sequence_length: 16000
|
41 |
+
new_tokens: 256
|
42 |
+
can_diffuse: ${can_diffuse:${task}}
|
43 |
+
can_generate: ${can_generate:${task}}
|
44 |
+
forward_kwargs: {}
|
45 |
+
generate_kwargs: {}
|
46 |
+
launcher:
|
47 |
+
name: process
|
48 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
49 |
+
device_isolation: true
|
50 |
+
start_method: spawn
|
51 |
+
experiment_name: pytorch+cuda+bfloat16
|
52 |
+
device: cuda
|
53 |
+
model: bigcode/starcoderbase
|
54 |
+
task: ${infer_task:${model}}
|
55 |
+
library: ${infer_library:${model}}
|
56 |
+
hub_kwargs:
|
57 |
+
revision: main
|
58 |
+
cache_dir: null
|
59 |
+
force_download: false
|
60 |
+
local_files_only: false
|
61 |
+
trust_remote_code: true
|
62 |
+
environment:
|
63 |
+
optimum_version: 1.16.2
|
64 |
+
optimum_commit: null
|
65 |
+
transformers_version: 4.37.2
|
66 |
+
transformers_commit: null
|
67 |
+
accelerate_version: 0.27.2
|
68 |
+
accelerate_commit: null
|
69 |
+
diffusers_version: null
|
70 |
+
diffusers_commit: null
|
71 |
+
python_version: 3.10.12
|
72 |
+
system: Linux
|
73 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
74 |
+
cpu_count: 128
|
75 |
+
cpu_ram_mb: 540671
|
76 |
+
gpus:
|
77 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=bigcode/starcoderbase
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=bigcode/starcoderbase
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+bfloat16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/opt-bench
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/opt-bench/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=bigcode/starcoderbase
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/cli.log
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-02-14 17:59:46,545][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-02-14 17:59:46,546][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-02-14 17:59:46,548][process][INFO] - + Launched worker process with PID 1875308.
|
4 |
+
[2024-02-14 17:59:46,549][isolation][INFO] - + Launched device(s) isolation process 1875309.
|
5 |
+
[2024-02-14 17:59:52,797][numexpr.utils][INFO] - Note: detected 128 virtual cores but NumExpr set to maximum of 64, check "NUMEXPR_MAX_THREADS" environment variable.
|
6 |
+
[2024-02-14 17:59:52,797][numexpr.utils][INFO] - Note: NumExpr detected 128 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
7 |
+
[2024-02-14 17:59:52,797][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
8 |
+
[2024-02-14 17:59:52,905][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
9 |
+
[2024-02-14 17:59:54,081][backend][INFO] - Configuring pytorch backend
|
10 |
+
[2024-02-14 17:59:54,081][pytorch][INFO] - + Inferred class AutoModelForCausalLM for task text-generation and model_type gpt_bigcode
|
11 |
+
[2024-02-14 17:59:54,081][pytorch][INFO] - + Disabling gradients
|
12 |
+
[2024-02-14 17:59:54,081][pytorch][INFO] - + Loading model with no weights
|
13 |
+
[2024-02-14 17:59:54,081][pytorch][INFO] - + Creating no weights model directory
|
14 |
+
[2024-02-14 17:59:54,082][pytorch][INFO] - + Saving pretrained config
|
15 |
+
[2024-02-14 17:59:54,082][pytorch][INFO] - + Creating no weights model
|
16 |
+
[2024-02-14 17:59:54,085][pytorch][INFO] - + Saving no weights model
|
17 |
+
[2024-02-14 17:59:54,085][pytorch][INFO] - + Loading no weights model
|
18 |
+
[2024-02-14 17:59:54,085][pytorch][INFO] - + Loading model directly on device: cuda
|
19 |
+
[2024-02-14 17:59:56,058][pytorch][INFO] - + Randomizing model weights
|
20 |
+
[2024-02-14 17:59:56,064][pytorch][INFO] - + Tying model weights after randomization
|
21 |
+
[2024-02-14 17:59:56,066][pytorch][INFO] - + Turning on model's eval mode
|
22 |
+
[2024-02-14 17:59:56,175][benchmark][INFO] - Configuring inference benchmark
|
23 |
+
[2024-02-14 17:59:56,175][inference][INFO] - Running inference benchmark
|
24 |
+
[2024-02-14 17:59:56,175][inference][INFO] - + Updating input shapes with model shapes
|
25 |
+
[2024-02-14 17:59:56,175][inference][INFO] - + Preparing backend for inference
|
26 |
+
[2024-02-14 17:59:56,176][inference][INFO] - + Creating input generator
|
27 |
+
[2024-02-14 17:59:56,176][input-generator][INFO] - Using text-generation task generator
|
28 |
+
[2024-02-14 17:59:56,176][inference][INFO] - + Preparing input for the forward pass
|
29 |
+
[2024-02-14 17:59:56,176][pytorch][INFO] - + Moving inputs tensors to device cuda
|
30 |
+
[2024-02-14 17:59:56,213][inference][INFO] - + Tracking forward pass peak memory
|
31 |
+
[2024-02-14 17:59:56,213][memory][INFO] - Tracking CUDA devices: [0]
|
32 |
+
[2024-02-14 17:59:56,213][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
33 |
+
[2024-02-14 17:59:59,297][inference][INFO] - + Forward pass max memory used: 32868 (MB)
|
34 |
+
[2024-02-14 17:59:59,297][inference][INFO] - + Forward pass max memory reserved: 31411 (MB)
|
35 |
+
[2024-02-14 17:59:59,297][inference][INFO] - + Forward pass max memory allocated: 31327 (MB)
|
36 |
+
[2024-02-14 17:59:59,297][inference][INFO] - + Preparing input for the generation pass
|
37 |
+
[2024-02-14 17:59:59,297][pytorch][INFO] - + Moving inputs tensors to device cuda
|
38 |
+
[2024-02-14 17:59:59,297][inference][INFO] - + Tracking generation pass peak memory
|
39 |
+
[2024-02-14 17:59:59,297][memory][INFO] - Tracking CUDA devices: [0]
|
40 |
+
[2024-02-14 17:59:59,298][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
41 |
+
[2024-02-14 18:00:08,361][inference][INFO] - + Generation pass max memory used: 32893 (MB)
|
42 |
+
[2024-02-14 18:00:08,361][inference][INFO] - + Generation pass max memory reserved: 31434 (MB)
|
43 |
+
[2024-02-14 18:00:08,361][inference][INFO] - + Generation pass max memory allocated: 31327 (MB)
|
44 |
+
[2024-02-14 18:00:08,362][inference][INFO] - + Preparing input for the forward pass
|
45 |
+
[2024-02-14 18:00:08,362][pytorch][INFO] - + Moving inputs tensors to device cuda
|
46 |
+
[2024-02-14 18:00:08,362][inference][INFO] - + Warming up the forward pass
|
47 |
+
[2024-02-14 18:00:09,384][inference][INFO] - + Tracking forward pass latency and throughput
|
48 |
+
[2024-02-14 18:00:19,460][inference][INFO] - + Forward pass latency: 5.34e-02 (s)
|
49 |
+
[2024-02-14 18:00:19,461][inference][INFO] - + Forward pass throughput: 18.70 (samples/s)
|
50 |
+
[2024-02-14 18:00:19,461][inference][INFO] - + Preparing input for the generation pass
|
51 |
+
[2024-02-14 18:00:19,461][pytorch][INFO] - + Moving inputs tensors to device cuda
|
52 |
+
[2024-02-14 18:00:19,461][inference][INFO] - + Warming up the generation pass
|
53 |
+
[2024-02-14 18:00:25,464][inference][INFO] - + Tracking generation latency and throughput
|
54 |
+
[2024-02-14 18:00:37,467][inference][INFO] - + Generation pass latency: 6.00e+00 (s)
|
55 |
+
[2024-02-14 18:00:37,467][inference][INFO] - + Generation pass throughput: 42.70 (tokens/s)
|
56 |
+
[2024-02-14 18:00:37,467][inference][INFO] - + Preparing input for the forward pass
|
57 |
+
[2024-02-14 18:00:37,467][pytorch][INFO] - + Moving inputs tensors to device cuda
|
58 |
+
[2024-02-14 18:00:37,467][inference][INFO] - + Tracking forward pass energy consumption
|
59 |
+
[2024-02-14 18:00:53,074][inference][INFO] - + Forward pass energy consumption: 6.03e-06 (kWh/sample)
|
60 |
+
[2024-02-14 18:00:53,074][inference][INFO] - + Forward pass carbon emissions: 4.06e-07 (kgCO2eq/sample)
|
61 |
+
[2024-02-14 18:00:53,074][inference][INFO] - + Full details in the CodeCarbon report: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/forward_codecarbon.csv
|
62 |
+
[2024-02-14 18:00:53,075][inference][INFO] - + Preparing input for the generation pass
|
63 |
+
[2024-02-14 18:00:53,075][pytorch][INFO] - + Moving inputs tensors to device cuda
|
64 |
+
[2024-02-14 18:00:53,075][inference][INFO] - + Tracking generation pass energy consumption
|
65 |
+
[2024-02-14 18:01:11,177][inference][INFO] - + Generation pass energy consumption: 2.61e-06 (kWh/token)
|
66 |
+
[2024-02-14 18:01:11,177][inference][INFO] - + Generation pass carbon emissions: 1.76e-07 (kgCO2eq/token)
|
67 |
+
[2024-02-14 18:01:11,177][inference][INFO] - + Full details in the CodeCarbon report: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/generate_codecarbon.csv
|
68 |
+
[2024-02-14 18:01:11,177][inference][INFO] - Saving results
|
69 |
+
[2024-02-14 18:01:11,179][backend][INFO] - Cleaning pytorch backend
|
70 |
+
[2024-02-14 18:01:11,179][backend][INFO] - + Deleting pretrained model
|
71 |
+
[2024-02-14 18:01:11,410][pytorch][INFO] - + Cleaning temporary directory
|
72 |
+
[2024-02-14 18:01:11,411][pytorch][INFO] - + Emptying CUDA cache
|
73 |
+
[2024-02-14 18:01:13,504][isolation][INFO] - + Closing device(s) isolation process...
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-02-14T18:00:53,codecarbon,b9b0c3b0-e693-492a-b26c-32fb4af114d8,10.493306159973145,7.430436669633096e-05,7.081120627144756e-06,112.5,866.1832244563208,0.34720659255981445,0.0003278931528329849,0.000773969785841544,7.393271858503188e-07,0.0011026022658603791,France,FRA,île-de-france,,,Linux-5.4.0-166-generic-x86_64-with-glibc2.35,3.10.12,2.3.4,128,AMD EPYC 7742 64-Core Processor,1,1 x NVIDIA A100-SXM4-80GB,2.4075,48.8323,503.5396919250488,process,N,1.0
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-02-14T18:01:11,codecarbon,0804188d-f1cb-48d7-bd90-aa7ff798d785,12.91486644744873,9.005441423796991e-05,6.972926480069e-06,112.5,0.0,0.34815502166748047,0.00040356197208166124,0.0009318851899529079,8.700102160601849e-07,0.0013363171722506292,France,FRA,île-de-france,,,Linux-5.4.0-166-generic-x86_64-with-glibc2.35,3.10.12,2.3.4,128,AMD EPYC 7742 64-Core Processor,1,1 x NVIDIA A100-SXM4-80GB,2.4075,48.8323,503.5396919250488,process,N,1.0
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/hydra_config.yaml
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: bfloat16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: null
|
26 |
+
quantization_config: {}
|
27 |
+
data_parallel: false
|
28 |
+
deepspeed_inference: false
|
29 |
+
deepspeed_inference_config: {}
|
30 |
+
peft_strategy: null
|
31 |
+
peft_config: {}
|
32 |
+
benchmark:
|
33 |
+
name: inference
|
34 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
35 |
+
duration: 10
|
36 |
+
warmup_runs: 10
|
37 |
+
memory: true
|
38 |
+
energy: true
|
39 |
+
input_shapes:
|
40 |
+
batch_size: 1
|
41 |
+
sequence_length: 256
|
42 |
+
num_choices: 1
|
43 |
+
feature_size: 80
|
44 |
+
nb_max_frames: 3000
|
45 |
+
audio_sequence_length: 16000
|
46 |
+
new_tokens: 256
|
47 |
+
can_diffuse: false
|
48 |
+
can_generate: true
|
49 |
+
forward_kwargs: {}
|
50 |
+
generate_kwargs:
|
51 |
+
num_return_sequences: 1
|
52 |
+
max_new_tokens: 256
|
53 |
+
min_new_tokens: 256
|
54 |
+
do_sample: false
|
55 |
+
use_cache: true
|
56 |
+
pad_token_id: 0
|
57 |
+
temperature: 1.0
|
58 |
+
num_beams: 1
|
59 |
+
experiment_name: pytorch+cuda+bfloat16
|
60 |
+
device: cuda
|
61 |
+
model: bigcode/starcoderbase
|
62 |
+
task: text-generation
|
63 |
+
library: transformers
|
64 |
+
hub_kwargs:
|
65 |
+
revision: main
|
66 |
+
cache_dir: null
|
67 |
+
force_download: false
|
68 |
+
local_files_only: false
|
69 |
+
trust_remote_code: true
|
70 |
+
environment:
|
71 |
+
optimum_version: 1.16.2
|
72 |
+
optimum_commit: null
|
73 |
+
transformers_version: 4.37.2
|
74 |
+
transformers_commit: null
|
75 |
+
accelerate_version: 0.27.2
|
76 |
+
accelerate_commit: null
|
77 |
+
diffusers_version: null
|
78 |
+
diffusers_commit: null
|
79 |
+
python_version: 3.10.12
|
80 |
+
system: Linux
|
81 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
82 |
+
cpu_count: 128
|
83 |
+
cpu_ram_mb: 540671
|
84 |
+
gpus:
|
85 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+bfloat16/bigcode/starcoderbase/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.0534,18.7,32868,32868,31327,31411,6.03e-06,4.06e-07,6.0,42.7,5.95,42.9,32893,32893,31327,31434,2.61e-06,1.76e-07
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/.hydra/config.yaml
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: awq
|
21 |
+
quantization_config:
|
22 |
+
bits: 4
|
23 |
+
version: gemm
|
24 |
+
do_fuse: false
|
25 |
+
data_parallel: false
|
26 |
+
deepspeed_inference: false
|
27 |
+
deepspeed_inference_config: {}
|
28 |
+
peft_strategy: null
|
29 |
+
peft_config: {}
|
30 |
+
benchmark:
|
31 |
+
name: inference
|
32 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
33 |
+
duration: 10
|
34 |
+
warmup_runs: 10
|
35 |
+
memory: true
|
36 |
+
energy: true
|
37 |
+
input_shapes:
|
38 |
+
batch_size: 1
|
39 |
+
sequence_length: 256
|
40 |
+
num_choices: 1
|
41 |
+
feature_size: 80
|
42 |
+
nb_max_frames: 3000
|
43 |
+
audio_sequence_length: 16000
|
44 |
+
new_tokens: 256
|
45 |
+
can_diffuse: ${can_diffuse:${task}}
|
46 |
+
can_generate: ${can_generate:${task}}
|
47 |
+
forward_kwargs: {}
|
48 |
+
generate_kwargs: {}
|
49 |
+
launcher:
|
50 |
+
name: process
|
51 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
52 |
+
device_isolation: true
|
53 |
+
start_method: spawn
|
54 |
+
experiment_name: pytorch+cuda+float16+awq-4bit+gemm
|
55 |
+
device: cuda
|
56 |
+
model: bigcode/starcoderbase-1b
|
57 |
+
task: ${infer_task:${model}}
|
58 |
+
library: ${infer_library:${model}}
|
59 |
+
hub_kwargs:
|
60 |
+
revision: main
|
61 |
+
cache_dir: null
|
62 |
+
force_download: false
|
63 |
+
local_files_only: false
|
64 |
+
trust_remote_code: true
|
65 |
+
environment:
|
66 |
+
optimum_version: 1.16.2
|
67 |
+
optimum_commit: null
|
68 |
+
transformers_version: 4.37.2
|
69 |
+
transformers_commit: null
|
70 |
+
accelerate_version: 0.27.2
|
71 |
+
accelerate_commit: null
|
72 |
+
diffusers_version: null
|
73 |
+
diffusers_commit: null
|
74 |
+
python_version: 3.10.12
|
75 |
+
system: Linux
|
76 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
77 |
+
cpu_count: 128
|
78 |
+
cpu_ram_mb: 540671
|
79 |
+
gpus:
|
80 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=bigcode/starcoderbase-1b
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=bigcode/starcoderbase-1b
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16+awq-4bit+gemm
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/opt-bench
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/opt-bench/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=bigcode/starcoderbase-1b
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/cli.log
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-02-14 20:11:51,714][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-02-14 20:11:51,715][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-02-14 20:11:51,717][process][INFO] - + Launched worker process with PID 2000347.
|
4 |
+
[2024-02-14 20:11:51,718][isolation][INFO] - + Launched device(s) isolation process 2000348.
|
5 |
+
[2024-02-14 20:11:58,493][numexpr.utils][INFO] - Note: detected 128 virtual cores but NumExpr set to maximum of 64, check "NUMEXPR_MAX_THREADS" environment variable.
|
6 |
+
[2024-02-14 20:11:58,494][numexpr.utils][INFO] - Note: NumExpr detected 128 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
7 |
+
[2024-02-14 20:11:58,494][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
8 |
+
[2024-02-14 20:11:58,661][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
9 |
+
[2024-02-14 20:11:58,985][isolation][ERROR] - Found non-permitted process(es) running on system device(s): {1989630}
|
10 |
+
[2024-02-14 20:11:58,985][isolation][ERROR] - Terminating benchmark process 2000113
|
11 |
+
[2024-02-14 20:11:59,883][backend][INFO] - Configuring pytorch backend
|
12 |
+
[2024-02-14 20:11:59,883][pytorch][INFO] - + Inferred class AutoModelForCausalLM for task text-generation and model_type gpt_bigcode
|
13 |
+
[2024-02-14 20:11:59,883][pytorch][INFO] - + Disabling gradients
|
14 |
+
[2024-02-14 20:11:59,883][pytorch][INFO] - + Processing quantization config
|
15 |
+
[2024-02-14 20:11:59,883][pytorch][INFO] - + Processing AWQ config
|
16 |
+
[2024-02-14 20:12:01,241][pytorch][INFO] - + Loading model with no weights
|
17 |
+
[2024-02-14 20:12:01,242][pytorch][INFO] - + Creating no weights model directory
|
18 |
+
[2024-02-14 20:12:01,242][pytorch][INFO] - + Saving pretrained config
|
19 |
+
[2024-02-14 20:12:01,243][pytorch][INFO] - + Creating no weights model
|
20 |
+
[2024-02-14 20:12:01,247][pytorch][INFO] - + Saving no weights model
|
21 |
+
[2024-02-14 20:12:01,247][pytorch][INFO] - + Loading no weights model
|
22 |
+
[2024-02-14 20:12:01,247][pytorch][INFO] - + Loading quantized model
|
23 |
+
[2024-02-14 20:12:15,038][experiment][ERROR] - Error during backend configuration: 'WQLinear_GEMM' object has no attribute 'weight'
|
24 |
+
[2024-02-14 20:12:15,038][backend][INFO] - Cleaning pytorch backend
|
25 |
+
[2024-02-14 20:12:15,229][pytorch][INFO] - + Cleaning temporary directory
|
26 |
+
[2024-02-14 20:12:15,229][pytorch][INFO] - + Emptying CUDA cache
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-1b/hydra_config.yaml
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: awq
|
26 |
+
quantization_config:
|
27 |
+
bits: 4
|
28 |
+
version: gemm
|
29 |
+
do_fuse: false
|
30 |
+
data_parallel: false
|
31 |
+
deepspeed_inference: false
|
32 |
+
deepspeed_inference_config: {}
|
33 |
+
peft_strategy: null
|
34 |
+
peft_config: {}
|
35 |
+
benchmark:
|
36 |
+
name: inference
|
37 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
38 |
+
duration: 10
|
39 |
+
warmup_runs: 10
|
40 |
+
memory: true
|
41 |
+
energy: true
|
42 |
+
input_shapes:
|
43 |
+
batch_size: 1
|
44 |
+
sequence_length: 256
|
45 |
+
num_choices: 1
|
46 |
+
feature_size: 80
|
47 |
+
nb_max_frames: 3000
|
48 |
+
audio_sequence_length: 16000
|
49 |
+
new_tokens: 256
|
50 |
+
can_diffuse: false
|
51 |
+
can_generate: true
|
52 |
+
forward_kwargs: {}
|
53 |
+
generate_kwargs:
|
54 |
+
num_return_sequences: 1
|
55 |
+
max_new_tokens: 256
|
56 |
+
min_new_tokens: 256
|
57 |
+
do_sample: false
|
58 |
+
use_cache: true
|
59 |
+
pad_token_id: 0
|
60 |
+
temperature: 1.0
|
61 |
+
num_beams: 1
|
62 |
+
experiment_name: pytorch+cuda+float16+awq-4bit+gemm
|
63 |
+
device: cuda
|
64 |
+
model: bigcode/starcoderbase-1b
|
65 |
+
task: text-generation
|
66 |
+
library: transformers
|
67 |
+
hub_kwargs:
|
68 |
+
revision: main
|
69 |
+
cache_dir: null
|
70 |
+
force_download: false
|
71 |
+
local_files_only: false
|
72 |
+
trust_remote_code: true
|
73 |
+
environment:
|
74 |
+
optimum_version: 1.16.2
|
75 |
+
optimum_commit: null
|
76 |
+
transformers_version: 4.37.2
|
77 |
+
transformers_commit: null
|
78 |
+
accelerate_version: 0.27.2
|
79 |
+
accelerate_commit: null
|
80 |
+
diffusers_version: null
|
81 |
+
diffusers_commit: null
|
82 |
+
python_version: 3.10.12
|
83 |
+
system: Linux
|
84 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
85 |
+
cpu_count: 128
|
86 |
+
cpu_ram_mb: 540671
|
87 |
+
gpus:
|
88 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/.hydra/config.yaml
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: awq
|
21 |
+
quantization_config:
|
22 |
+
bits: 4
|
23 |
+
version: gemm
|
24 |
+
do_fuse: false
|
25 |
+
data_parallel: false
|
26 |
+
deepspeed_inference: false
|
27 |
+
deepspeed_inference_config: {}
|
28 |
+
peft_strategy: null
|
29 |
+
peft_config: {}
|
30 |
+
benchmark:
|
31 |
+
name: inference
|
32 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
33 |
+
duration: 10
|
34 |
+
warmup_runs: 10
|
35 |
+
memory: true
|
36 |
+
energy: true
|
37 |
+
input_shapes:
|
38 |
+
batch_size: 1
|
39 |
+
sequence_length: 256
|
40 |
+
num_choices: 1
|
41 |
+
feature_size: 80
|
42 |
+
nb_max_frames: 3000
|
43 |
+
audio_sequence_length: 16000
|
44 |
+
new_tokens: 256
|
45 |
+
can_diffuse: ${can_diffuse:${task}}
|
46 |
+
can_generate: ${can_generate:${task}}
|
47 |
+
forward_kwargs: {}
|
48 |
+
generate_kwargs: {}
|
49 |
+
launcher:
|
50 |
+
name: process
|
51 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
52 |
+
device_isolation: true
|
53 |
+
start_method: spawn
|
54 |
+
experiment_name: pytorch+cuda+float16+awq-4bit+gemm
|
55 |
+
device: cuda
|
56 |
+
model: bigcode/starcoderbase-3b
|
57 |
+
task: ${infer_task:${model}}
|
58 |
+
library: ${infer_library:${model}}
|
59 |
+
hub_kwargs:
|
60 |
+
revision: main
|
61 |
+
cache_dir: null
|
62 |
+
force_download: false
|
63 |
+
local_files_only: false
|
64 |
+
trust_remote_code: true
|
65 |
+
environment:
|
66 |
+
optimum_version: 1.16.2
|
67 |
+
optimum_commit: null
|
68 |
+
transformers_version: 4.37.2
|
69 |
+
transformers_commit: null
|
70 |
+
accelerate_version: 0.27.2
|
71 |
+
accelerate_commit: null
|
72 |
+
diffusers_version: null
|
73 |
+
diffusers_commit: null
|
74 |
+
python_version: 3.10.12
|
75 |
+
system: Linux
|
76 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
77 |
+
cpu_count: 128
|
78 |
+
cpu_ram_mb: 540671
|
79 |
+
gpus:
|
80 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=bigcode/starcoderbase-3b
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=bigcode/starcoderbase-3b
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16+awq-4bit+gemm
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/opt-bench
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/opt-bench/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=bigcode/starcoderbase-3b
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/cli.log
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-02-14 20:12:03,843][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-02-14 20:12:03,843][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-02-14 20:12:03,846][process][INFO] - + Launched worker process with PID 2000920.
|
4 |
+
[2024-02-14 20:12:03,846][isolation][INFO] - + Launched device(s) isolation process 2000921.
|
5 |
+
[2024-02-14 20:12:10,044][isolation][ERROR] - Found non-permitted process(es) running on system device(s): {1989630}
|
6 |
+
[2024-02-14 20:12:10,044][isolation][ERROR] - Terminating benchmark process 2000665
|
7 |
+
[2024-02-14 20:12:10,643][numexpr.utils][INFO] - Note: detected 128 virtual cores but NumExpr set to maximum of 64, check "NUMEXPR_MAX_THREADS" environment variable.
|
8 |
+
[2024-02-14 20:12:10,643][numexpr.utils][INFO] - Note: NumExpr detected 128 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
9 |
+
[2024-02-14 20:12:10,643][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
10 |
+
[2024-02-14 20:12:10,759][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
11 |
+
[2024-02-14 20:12:12,453][backend][INFO] - Configuring pytorch backend
|
12 |
+
[2024-02-14 20:12:12,453][pytorch][INFO] - + Inferred class AutoModelForCausalLM for task text-generation and model_type gpt_bigcode
|
13 |
+
[2024-02-14 20:12:12,453][pytorch][INFO] - + Disabling gradients
|
14 |
+
[2024-02-14 20:12:12,453][pytorch][INFO] - + Processing quantization config
|
15 |
+
[2024-02-14 20:12:12,453][pytorch][INFO] - + Processing AWQ config
|
16 |
+
[2024-02-14 20:12:12,878][pytorch][INFO] - + Loading model with no weights
|
17 |
+
[2024-02-14 20:12:12,879][pytorch][INFO] - + Creating no weights model directory
|
18 |
+
[2024-02-14 20:12:12,879][pytorch][INFO] - + Saving pretrained config
|
19 |
+
[2024-02-14 20:12:12,880][pytorch][INFO] - + Creating no weights model
|
20 |
+
[2024-02-14 20:12:12,885][pytorch][INFO] - + Saving no weights model
|
21 |
+
[2024-02-14 20:12:12,885][pytorch][INFO] - + Loading no weights model
|
22 |
+
[2024-02-14 20:12:12,886][pytorch][INFO] - + Loading quantized model
|
23 |
+
[2024-02-14 20:12:33,600][experiment][ERROR] - Error during backend configuration: 'WQLinear_GEMM' object has no attribute 'weight'
|
24 |
+
[2024-02-14 20:12:33,600][backend][INFO] - Cleaning pytorch backend
|
25 |
+
[2024-02-14 20:12:33,716][pytorch][INFO] - + Cleaning temporary directory
|
26 |
+
[2024-02-14 20:12:33,717][pytorch][INFO] - + Emptying CUDA cache
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-3b/hydra_config.yaml
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: awq
|
26 |
+
quantization_config:
|
27 |
+
bits: 4
|
28 |
+
version: gemm
|
29 |
+
do_fuse: false
|
30 |
+
data_parallel: false
|
31 |
+
deepspeed_inference: false
|
32 |
+
deepspeed_inference_config: {}
|
33 |
+
peft_strategy: null
|
34 |
+
peft_config: {}
|
35 |
+
benchmark:
|
36 |
+
name: inference
|
37 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
38 |
+
duration: 10
|
39 |
+
warmup_runs: 10
|
40 |
+
memory: true
|
41 |
+
energy: true
|
42 |
+
input_shapes:
|
43 |
+
batch_size: 1
|
44 |
+
sequence_length: 256
|
45 |
+
num_choices: 1
|
46 |
+
feature_size: 80
|
47 |
+
nb_max_frames: 3000
|
48 |
+
audio_sequence_length: 16000
|
49 |
+
new_tokens: 256
|
50 |
+
can_diffuse: false
|
51 |
+
can_generate: true
|
52 |
+
forward_kwargs: {}
|
53 |
+
generate_kwargs:
|
54 |
+
num_return_sequences: 1
|
55 |
+
max_new_tokens: 256
|
56 |
+
min_new_tokens: 256
|
57 |
+
do_sample: false
|
58 |
+
use_cache: true
|
59 |
+
pad_token_id: 0
|
60 |
+
temperature: 1.0
|
61 |
+
num_beams: 1
|
62 |
+
experiment_name: pytorch+cuda+float16+awq-4bit+gemm
|
63 |
+
device: cuda
|
64 |
+
model: bigcode/starcoderbase-3b
|
65 |
+
task: text-generation
|
66 |
+
library: transformers
|
67 |
+
hub_kwargs:
|
68 |
+
revision: main
|
69 |
+
cache_dir: null
|
70 |
+
force_download: false
|
71 |
+
local_files_only: false
|
72 |
+
trust_remote_code: true
|
73 |
+
environment:
|
74 |
+
optimum_version: 1.16.2
|
75 |
+
optimum_commit: null
|
76 |
+
transformers_version: 4.37.2
|
77 |
+
transformers_commit: null
|
78 |
+
accelerate_version: 0.27.2
|
79 |
+
accelerate_commit: null
|
80 |
+
diffusers_version: null
|
81 |
+
diffusers_commit: null
|
82 |
+
python_version: 3.10.12
|
83 |
+
system: Linux
|
84 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
85 |
+
cpu_count: 128
|
86 |
+
cpu_ram_mb: 540671
|
87 |
+
gpus:
|
88 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/.hydra/config.yaml
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: awq
|
21 |
+
quantization_config:
|
22 |
+
bits: 4
|
23 |
+
version: gemm
|
24 |
+
do_fuse: false
|
25 |
+
data_parallel: false
|
26 |
+
deepspeed_inference: false
|
27 |
+
deepspeed_inference_config: {}
|
28 |
+
peft_strategy: null
|
29 |
+
peft_config: {}
|
30 |
+
benchmark:
|
31 |
+
name: inference
|
32 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
33 |
+
duration: 10
|
34 |
+
warmup_runs: 10
|
35 |
+
memory: true
|
36 |
+
energy: true
|
37 |
+
input_shapes:
|
38 |
+
batch_size: 1
|
39 |
+
sequence_length: 256
|
40 |
+
num_choices: 1
|
41 |
+
feature_size: 80
|
42 |
+
nb_max_frames: 3000
|
43 |
+
audio_sequence_length: 16000
|
44 |
+
new_tokens: 256
|
45 |
+
can_diffuse: ${can_diffuse:${task}}
|
46 |
+
can_generate: ${can_generate:${task}}
|
47 |
+
forward_kwargs: {}
|
48 |
+
generate_kwargs: {}
|
49 |
+
launcher:
|
50 |
+
name: process
|
51 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
52 |
+
device_isolation: true
|
53 |
+
start_method: spawn
|
54 |
+
experiment_name: pytorch+cuda+float16+awq-4bit+gemm
|
55 |
+
device: cuda
|
56 |
+
model: bigcode/starcoderbase-7b
|
57 |
+
task: ${infer_task:${model}}
|
58 |
+
library: ${infer_library:${model}}
|
59 |
+
hub_kwargs:
|
60 |
+
revision: main
|
61 |
+
cache_dir: null
|
62 |
+
force_download: false
|
63 |
+
local_files_only: false
|
64 |
+
trust_remote_code: true
|
65 |
+
environment:
|
66 |
+
optimum_version: 1.16.2
|
67 |
+
optimum_commit: null
|
68 |
+
transformers_version: 4.37.2
|
69 |
+
transformers_commit: null
|
70 |
+
accelerate_version: 0.27.2
|
71 |
+
accelerate_commit: null
|
72 |
+
diffusers_version: null
|
73 |
+
diffusers_commit: null
|
74 |
+
python_version: 3.10.12
|
75 |
+
system: Linux
|
76 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
77 |
+
cpu_count: 128
|
78 |
+
cpu_ram_mb: 540671
|
79 |
+
gpus:
|
80 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=bigcode/starcoderbase-7b
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=bigcode/starcoderbase-7b
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16+awq-4bit+gemm
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/opt-bench
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/opt-bench/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=bigcode/starcoderbase-7b
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/cli.log
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-02-14 20:12:14,937][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-02-14 20:12:14,937][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-02-14 20:12:14,939][process][INFO] - + Launched worker process with PID 2001537.
|
4 |
+
[2024-02-14 20:12:14,940][isolation][INFO] - + Launched device(s) isolation process 2001538.
|
5 |
+
[2024-02-14 20:12:21,080][isolation][ERROR] - Found non-permitted process(es) running on system device(s): {1989630}
|
6 |
+
[2024-02-14 20:12:21,080][isolation][ERROR] - Terminating benchmark process 2001272
|
7 |
+
[2024-02-14 20:12:21,313][numexpr.utils][INFO] - Note: detected 128 virtual cores but NumExpr set to maximum of 64, check "NUMEXPR_MAX_THREADS" environment variable.
|
8 |
+
[2024-02-14 20:12:21,313][numexpr.utils][INFO] - Note: NumExpr detected 128 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
9 |
+
[2024-02-14 20:12:21,313][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
10 |
+
[2024-02-14 20:12:21,428][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
11 |
+
[2024-02-14 20:12:22,919][backend][INFO] - Configuring pytorch backend
|
12 |
+
[2024-02-14 20:12:22,919][pytorch][INFO] - + Inferred class AutoModelForCausalLM for task text-generation and model_type gpt_bigcode
|
13 |
+
[2024-02-14 20:12:22,919][pytorch][INFO] - + Disabling gradients
|
14 |
+
[2024-02-14 20:12:22,919][pytorch][INFO] - + Processing quantization config
|
15 |
+
[2024-02-14 20:12:22,919][pytorch][INFO] - + Processing AWQ config
|
16 |
+
[2024-02-14 20:12:22,948][pytorch][INFO] - + Loading model with no weights
|
17 |
+
[2024-02-14 20:12:22,949][pytorch][INFO] - + Creating no weights model directory
|
18 |
+
[2024-02-14 20:12:22,949][pytorch][INFO] - + Saving pretrained config
|
19 |
+
[2024-02-14 20:12:22,951][pytorch][INFO] - + Creating no weights model
|
20 |
+
[2024-02-14 20:12:22,955][pytorch][INFO] - + Saving no weights model
|
21 |
+
[2024-02-14 20:12:22,956][pytorch][INFO] - + Loading no weights model
|
22 |
+
[2024-02-14 20:12:22,956][pytorch][INFO] - + Loading quantized model
|
23 |
+
[2024-02-14 20:12:53,845][experiment][ERROR] - Error during backend configuration: 'WQLinear_GEMM' object has no attribute 'weight'
|
24 |
+
[2024-02-14 20:12:53,845][backend][INFO] - Cleaning pytorch backend
|
25 |
+
[2024-02-14 20:12:53,965][pytorch][INFO] - + Cleaning temporary directory
|
26 |
+
[2024-02-14 20:12:53,966][pytorch][INFO] - + Emptying CUDA cache
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase-7b/hydra_config.yaml
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: awq
|
26 |
+
quantization_config:
|
27 |
+
bits: 4
|
28 |
+
version: gemm
|
29 |
+
do_fuse: false
|
30 |
+
data_parallel: false
|
31 |
+
deepspeed_inference: false
|
32 |
+
deepspeed_inference_config: {}
|
33 |
+
peft_strategy: null
|
34 |
+
peft_config: {}
|
35 |
+
benchmark:
|
36 |
+
name: inference
|
37 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
38 |
+
duration: 10
|
39 |
+
warmup_runs: 10
|
40 |
+
memory: true
|
41 |
+
energy: true
|
42 |
+
input_shapes:
|
43 |
+
batch_size: 1
|
44 |
+
sequence_length: 256
|
45 |
+
num_choices: 1
|
46 |
+
feature_size: 80
|
47 |
+
nb_max_frames: 3000
|
48 |
+
audio_sequence_length: 16000
|
49 |
+
new_tokens: 256
|
50 |
+
can_diffuse: false
|
51 |
+
can_generate: true
|
52 |
+
forward_kwargs: {}
|
53 |
+
generate_kwargs:
|
54 |
+
num_return_sequences: 1
|
55 |
+
max_new_tokens: 256
|
56 |
+
min_new_tokens: 256
|
57 |
+
do_sample: false
|
58 |
+
use_cache: true
|
59 |
+
pad_token_id: 0
|
60 |
+
temperature: 1.0
|
61 |
+
num_beams: 1
|
62 |
+
experiment_name: pytorch+cuda+float16+awq-4bit+gemm
|
63 |
+
device: cuda
|
64 |
+
model: bigcode/starcoderbase-7b
|
65 |
+
task: text-generation
|
66 |
+
library: transformers
|
67 |
+
hub_kwargs:
|
68 |
+
revision: main
|
69 |
+
cache_dir: null
|
70 |
+
force_download: false
|
71 |
+
local_files_only: false
|
72 |
+
trust_remote_code: true
|
73 |
+
environment:
|
74 |
+
optimum_version: 1.16.2
|
75 |
+
optimum_commit: null
|
76 |
+
transformers_version: 4.37.2
|
77 |
+
transformers_commit: null
|
78 |
+
accelerate_version: 0.27.2
|
79 |
+
accelerate_commit: null
|
80 |
+
diffusers_version: null
|
81 |
+
diffusers_commit: null
|
82 |
+
python_version: 3.10.12
|
83 |
+
system: Linux
|
84 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
85 |
+
cpu_count: 128
|
86 |
+
cpu_ram_mb: 540671
|
87 |
+
gpus:
|
88 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase/.hydra/config.yaml
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: awq
|
21 |
+
quantization_config:
|
22 |
+
bits: 4
|
23 |
+
version: gemm
|
24 |
+
do_fuse: false
|
25 |
+
data_parallel: false
|
26 |
+
deepspeed_inference: false
|
27 |
+
deepspeed_inference_config: {}
|
28 |
+
peft_strategy: null
|
29 |
+
peft_config: {}
|
30 |
+
benchmark:
|
31 |
+
name: inference
|
32 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
33 |
+
duration: 10
|
34 |
+
warmup_runs: 10
|
35 |
+
memory: true
|
36 |
+
energy: true
|
37 |
+
input_shapes:
|
38 |
+
batch_size: 1
|
39 |
+
sequence_length: 256
|
40 |
+
num_choices: 1
|
41 |
+
feature_size: 80
|
42 |
+
nb_max_frames: 3000
|
43 |
+
audio_sequence_length: 16000
|
44 |
+
new_tokens: 256
|
45 |
+
can_diffuse: ${can_diffuse:${task}}
|
46 |
+
can_generate: ${can_generate:${task}}
|
47 |
+
forward_kwargs: {}
|
48 |
+
generate_kwargs: {}
|
49 |
+
launcher:
|
50 |
+
name: process
|
51 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
52 |
+
device_isolation: true
|
53 |
+
start_method: spawn
|
54 |
+
experiment_name: pytorch+cuda+float16+awq-4bit+gemm
|
55 |
+
device: cuda
|
56 |
+
model: bigcode/starcoderbase
|
57 |
+
task: ${infer_task:${model}}
|
58 |
+
library: ${infer_library:${model}}
|
59 |
+
hub_kwargs:
|
60 |
+
revision: main
|
61 |
+
cache_dir: null
|
62 |
+
force_download: false
|
63 |
+
local_files_only: false
|
64 |
+
trust_remote_code: true
|
65 |
+
environment:
|
66 |
+
optimum_version: 1.16.2
|
67 |
+
optimum_commit: null
|
68 |
+
transformers_version: 4.37.2
|
69 |
+
transformers_commit: null
|
70 |
+
accelerate_version: 0.27.2
|
71 |
+
accelerate_commit: null
|
72 |
+
diffusers_version: null
|
73 |
+
diffusers_commit: null
|
74 |
+
python_version: 3.10.12
|
75 |
+
system: Linux
|
76 |
+
cpu: ' AMD EPYC 7742 64-Core Processor'
|
77 |
+
cpu_count: 128
|
78 |
+
cpu_ram_mb: 540671
|
79 |
+
gpus:
|
80 |
+
- NVIDIA A100-SXM4-80GB
|
hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=bigcode/starcoderbase
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=bigcode/starcoderbase
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16+awq-4bit+gemm
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/opt-bench
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/opt-bench/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/opt-bench/dataset/hf-dgx-01/pytorch+cuda+float16+awq-4bit+gemm/bigcode/starcoderbase
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|