Commit
•
a4e32fa
1
Parent(s):
7c04234
Update dataset
Browse files- audace/perf-report.csv +0 -0
- audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/.hydra/config.yaml +77 -0
- audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/cli.log +73 -0
- audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/forward_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/generate_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/hydra_config.yaml +86 -0
- audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/inference_results.csv +2 -0
- audace/pytorch+cuda+float16+bnb-4bit/microsoft/phi-1_5/.hydra/config.yaml +77 -0
- audace/pytorch+cuda+float16+bnb-4bit/microsoft/phi-1_5/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16+bnb-4bit/microsoft/phi-1_5/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16+bnb-4bit/microsoft/phi-1_5/cli.log +4 -0
- audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/.hydra/config.yaml +77 -0
- audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/cli.log +26 -0
- audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/hydra_config.yaml +86 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/.hydra/config.yaml +81 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/cli.log +69 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/forward_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/hydra_config.yaml +89 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/cli.log +72 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/forward_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/generate_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/inference_results.csv +2 -0
- audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/yyjjtt/test-model/cli.log +26 -0
audace/perf-report.csv
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/.hydra/config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: bnb
|
21 |
+
quantization_config:
|
22 |
+
load_in_4bit: true
|
23 |
+
data_parallel: false
|
24 |
+
deepspeed_inference: false
|
25 |
+
deepspeed_inference_config: {}
|
26 |
+
peft_strategy: null
|
27 |
+
peft_config: {}
|
28 |
+
benchmark:
|
29 |
+
name: inference
|
30 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
31 |
+
duration: 10
|
32 |
+
warmup_runs: 10
|
33 |
+
memory: true
|
34 |
+
energy: true
|
35 |
+
input_shapes:
|
36 |
+
batch_size: 1
|
37 |
+
sequence_length: 256
|
38 |
+
num_choices: 1
|
39 |
+
feature_size: 80
|
40 |
+
nb_max_frames: 3000
|
41 |
+
audio_sequence_length: 16000
|
42 |
+
new_tokens: 256
|
43 |
+
can_diffuse: ${can_diffuse:${task}}
|
44 |
+
can_generate: ${can_generate:${task}}
|
45 |
+
forward_kwargs: {}
|
46 |
+
generate_kwargs: {}
|
47 |
+
launcher:
|
48 |
+
name: process
|
49 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
50 |
+
device_isolation: true
|
51 |
+
start_method: spawn
|
52 |
+
experiment_name: pytorch+cuda+float16+bnb-4bit
|
53 |
+
device: cuda
|
54 |
+
model: TurkuNLP/gpt3-finnish-large
|
55 |
+
task: ${infer_task:${model}}
|
56 |
+
hub_kwargs:
|
57 |
+
revision: main
|
58 |
+
cache_dir: null
|
59 |
+
force_download: false
|
60 |
+
local_files_only: false
|
61 |
+
trust_remote_code: true
|
62 |
+
environment:
|
63 |
+
optimum_version: 1.16.1
|
64 |
+
optimum_commit: null
|
65 |
+
transformers_version: 4.36.2
|
66 |
+
transformers_commit: null
|
67 |
+
accelerate_version: 0.26.1
|
68 |
+
accelerate_commit: null
|
69 |
+
diffusers_version: null
|
70 |
+
diffusers_commit: null
|
71 |
+
python_version: 3.10.12
|
72 |
+
system: Linux
|
73 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
74 |
+
cpu_count: 32
|
75 |
+
cpu_ram_mb: 134796
|
76 |
+
gpus:
|
77 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=TurkuNLP/gpt3-finnish-large
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=TurkuNLP/gpt3-finnish-large
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16+bnb-4bit
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=TurkuNLP/gpt3-finnish-large
|
audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/cli.log
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 11:06:09,194][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 11:06:09,194][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 11:06:09,196][process][INFO] - + Launched worker process with PID 681730.
|
4 |
+
[2024-01-13 11:06:09,196][isolation][INFO] - + Launched device(s) isolation process 681731.
|
5 |
+
[2024-01-13 11:06:11,242][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
6 |
+
[2024-01-13 11:06:11,243][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
7 |
+
[2024-01-13 11:06:11,382][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
8 |
+
[2024-01-13 11:06:12,515][backend][WARNING] - Could not find the model's generation config
|
9 |
+
[2024-01-13 11:06:12,515][backend][INFO] - Configuring pytorch backend
|
10 |
+
[2024-01-13 11:06:12,515][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type bloom
|
11 |
+
[2024-01-13 11:06:12,515][pytorch][INFO] - + Disabling gradients
|
12 |
+
[2024-01-13 11:06:12,515][pytorch][INFO] - + Processing quantization config
|
13 |
+
[2024-01-13 11:06:12,515][pytorch][INFO] - + Processing BitsAndBytes config
|
14 |
+
[2024-01-13 11:06:12,516][pytorch][INFO] - + Loading model with no weights
|
15 |
+
[2024-01-13 11:06:12,516][pytorch][INFO] - + Creating no weights model directory
|
16 |
+
[2024-01-13 11:06:12,516][pytorch][INFO] - + Saving pretrained config
|
17 |
+
[2024-01-13 11:06:12,517][pytorch][INFO] - + Creating no weights model
|
18 |
+
[2024-01-13 11:06:12,517][pytorch][INFO] - + Saving no weights model
|
19 |
+
[2024-01-13 11:06:12,517][pytorch][INFO] - + Loading no weights model
|
20 |
+
[2024-01-13 11:06:12,517][pytorch][INFO] - + Loading quantized model
|
21 |
+
[2024-01-13 11:06:58,691][pytorch][INFO] - + Turning on model's eval mode
|
22 |
+
[2024-01-13 11:06:58,715][benchmark][INFO] - Configuring inference benchmark
|
23 |
+
[2024-01-13 11:06:58,715][inference][INFO] - Running inference benchmark
|
24 |
+
[2024-01-13 11:06:58,715][inference][INFO] - + Updating input shapes with model shapes
|
25 |
+
[2024-01-13 11:06:58,715][inference][INFO] - + Preparing backend for inference
|
26 |
+
[2024-01-13 11:06:58,715][inference][INFO] - + Creating input generator
|
27 |
+
[2024-01-13 11:06:58,715][input-generator][INFO] - Using text-generation task generator
|
28 |
+
[2024-01-13 11:06:58,715][inference][INFO] - + Preparing input for the forward pass
|
29 |
+
[2024-01-13 11:06:58,715][backend][INFO] - + Moving inputs tensors to device cuda
|
30 |
+
[2024-01-13 11:06:58,715][inference][INFO] - + Tracking forward pass peak memory
|
31 |
+
[2024-01-13 11:06:58,716][memory][INFO] - Tracking CUDA devices: [0]
|
32 |
+
[2024-01-13 11:06:58,716][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
33 |
+
[2024-01-13 11:06:58,872][inference][INFO] - + Forward pass max memory used: 1861 (MB)
|
34 |
+
[2024-01-13 11:06:58,872][inference][INFO] - + Forward pass max memory reserved: 1002 (MB)
|
35 |
+
[2024-01-13 11:06:58,872][inference][INFO] - + Forward pass max memory allocated: 904 (MB)
|
36 |
+
[2024-01-13 11:06:58,872][inference][INFO] - + Preparing input for the generation pass
|
37 |
+
[2024-01-13 11:06:58,872][backend][INFO] - + Moving inputs tensors to device cuda
|
38 |
+
[2024-01-13 11:06:58,872][inference][INFO] - + Tracking generation pass peak memory
|
39 |
+
[2024-01-13 11:06:58,872][memory][INFO] - Tracking CUDA devices: [0]
|
40 |
+
[2024-01-13 11:06:58,872][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
41 |
+
[2024-01-13 11:07:02,663][inference][INFO] - + Generation pass max memory used: 1972 (MB)
|
42 |
+
[2024-01-13 11:07:02,663][inference][INFO] - + Generation pass max memory reserved: 1113 (MB)
|
43 |
+
[2024-01-13 11:07:02,663][inference][INFO] - + Generation pass max memory allocated: 941 (MB)
|
44 |
+
[2024-01-13 11:07:02,663][inference][INFO] - + Preparing input for the forward pass
|
45 |
+
[2024-01-13 11:07:02,663][backend][INFO] - + Moving inputs tensors to device cuda
|
46 |
+
[2024-01-13 11:07:02,663][inference][INFO] - + Warming up the forward pass
|
47 |
+
[2024-01-13 11:07:02,845][inference][INFO] - + Tracking forward pass latency and throughput
|
48 |
+
[2024-01-13 11:07:12,869][inference][INFO] - + Forward pass latency: 1.92e-02 (s)
|
49 |
+
[2024-01-13 11:07:12,869][inference][INFO] - + Forward pass throughput: 52.10 (samples/s)
|
50 |
+
[2024-01-13 11:07:12,869][inference][INFO] - + Preparing input for the generation pass
|
51 |
+
[2024-01-13 11:07:12,869][backend][INFO] - + Moving inputs tensors to device cuda
|
52 |
+
[2024-01-13 11:07:12,869][inference][INFO] - + Warming up the generation pass
|
53 |
+
[2024-01-13 11:07:16,642][inference][INFO] - + Tracking generation latency and throughput
|
54 |
+
[2024-01-13 11:07:27,851][inference][INFO] - + Generation pass latency: 3.74e+00 (s)
|
55 |
+
[2024-01-13 11:07:27,852][inference][INFO] - + Generation pass throughput: 68.40 (tokens/s)
|
56 |
+
[2024-01-13 11:07:27,852][inference][INFO] - + Preparing input for the forward pass
|
57 |
+
[2024-01-13 11:07:27,852][backend][INFO] - + Moving inputs tensors to device cuda
|
58 |
+
[2024-01-13 11:07:27,852][inference][INFO] - + Tracking forward pass energy consumption
|
59 |
+
[2024-01-13 11:07:42,307][inference][INFO] - + Forward pass energy consumption: 2.13e-06 (kWh/sample)
|
60 |
+
[2024-01-13 11:07:42,307][inference][INFO] - + Forward pass carbon emissions: 1.44e-07 (kgCO2eq/sample)
|
61 |
+
[2024-01-13 11:07:42,307][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/forward_codecarbon.csv
|
62 |
+
[2024-01-13 11:07:42,307][inference][INFO] - + Preparing input for the generation pass
|
63 |
+
[2024-01-13 11:07:42,307][backend][INFO] - + Moving inputs tensors to device cuda
|
64 |
+
[2024-01-13 11:07:42,307][inference][INFO] - + Tracking generation pass energy consumption
|
65 |
+
[2024-01-13 11:07:57,670][inference][INFO] - + Generation pass energy consumption: 4.72e-07 (kWh/token)
|
66 |
+
[2024-01-13 11:07:57,670][inference][INFO] - + Generation pass carbon emissions: 3.18e-08 (kgCO2eq/token)
|
67 |
+
[2024-01-13 11:07:57,670][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/generate_codecarbon.csv
|
68 |
+
[2024-01-13 11:07:57,670][inference][INFO] - Saving results
|
69 |
+
[2024-01-13 11:07:57,672][backend][INFO] - Cleaning pytorch backend
|
70 |
+
[2024-01-13 11:07:57,672][backend][INFO] - + Deleting pretrained model
|
71 |
+
[2024-01-13 11:07:57,757][pytorch][INFO] - + Emptying CUDA cache
|
72 |
+
[2024-01-13 11:07:57,782][pytorch][INFO] - + Cleaning temporary directory
|
73 |
+
[2024-01-13 11:07:58,338][isolation][INFO] - + Closing device(s) isolation process...
|
audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T11:07:42,codecarbon,3759d63d-b8ea-40d4-9ac9-114947c3fbef,10.039524555206299,7.57895859216243e-05,7.549121027083033e-06,42.5,1862.8922841178,0.7758221626281738,0.00011851979212628471,0.0010040163587676787,2.1052784223193537e-06,0.0011246414293162828,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T11:07:57,codecarbon,5844e862-d9ff-456d-9b30-d8af40aa8ab5,10.926108837127686,2.4409756102474223e-05,2.2340758696754105e-06,42.5,76.2773413421425,0.7761855125427246,0.00012898654722505145,0.0002309362958601291,2.2934516540126046e-06,0.00036221629473919307,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/hydra_config.yaml
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: bnb
|
26 |
+
quantization_config:
|
27 |
+
llm_int8_threshold: 0.0
|
28 |
+
load_in_4bit: true
|
29 |
+
data_parallel: false
|
30 |
+
deepspeed_inference: false
|
31 |
+
deepspeed_inference_config: {}
|
32 |
+
peft_strategy: null
|
33 |
+
peft_config: {}
|
34 |
+
benchmark:
|
35 |
+
name: inference
|
36 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
37 |
+
duration: 10
|
38 |
+
warmup_runs: 10
|
39 |
+
memory: true
|
40 |
+
energy: true
|
41 |
+
input_shapes:
|
42 |
+
batch_size: 1
|
43 |
+
sequence_length: 256
|
44 |
+
num_choices: 1
|
45 |
+
feature_size: 80
|
46 |
+
nb_max_frames: 3000
|
47 |
+
audio_sequence_length: 16000
|
48 |
+
new_tokens: 256
|
49 |
+
can_diffuse: false
|
50 |
+
can_generate: true
|
51 |
+
forward_kwargs: {}
|
52 |
+
generate_kwargs:
|
53 |
+
num_return_sequences: 1
|
54 |
+
max_new_tokens: 256
|
55 |
+
min_new_tokens: 256
|
56 |
+
do_sample: false
|
57 |
+
use_cache: true
|
58 |
+
pad_token_id: 0
|
59 |
+
temperature: 1.0
|
60 |
+
num_beams: 1
|
61 |
+
experiment_name: pytorch+cuda+float16+bnb-4bit
|
62 |
+
device: cuda
|
63 |
+
model: TurkuNLP/gpt3-finnish-large
|
64 |
+
task: text-generation
|
65 |
+
hub_kwargs:
|
66 |
+
revision: main
|
67 |
+
cache_dir: null
|
68 |
+
force_download: false
|
69 |
+
local_files_only: false
|
70 |
+
trust_remote_code: true
|
71 |
+
environment:
|
72 |
+
optimum_version: 1.16.1
|
73 |
+
optimum_commit: null
|
74 |
+
transformers_version: 4.36.2
|
75 |
+
transformers_commit: null
|
76 |
+
accelerate_version: 0.26.1
|
77 |
+
accelerate_commit: null
|
78 |
+
diffusers_version: null
|
79 |
+
diffusers_commit: null
|
80 |
+
python_version: 3.10.12
|
81 |
+
system: Linux
|
82 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
83 |
+
cpu_count: 32
|
84 |
+
cpu_ram_mb: 134796
|
85 |
+
gpus:
|
86 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16+bnb-4bit/TurkuNLP/gpt3-finnish-large/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.0192,52.1,1861,1861,904,1002,2.13e-06,1.44e-07,3.74,68.4,3.72,68.5,1972,1972,941,1113,4.72e-07,3.18e-08
|
audace/pytorch+cuda+float16+bnb-4bit/microsoft/phi-1_5/.hydra/config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: bnb
|
21 |
+
quantization_config:
|
22 |
+
load_in_4bit: true
|
23 |
+
data_parallel: false
|
24 |
+
deepspeed_inference: false
|
25 |
+
deepspeed_inference_config: {}
|
26 |
+
peft_strategy: null
|
27 |
+
peft_config: {}
|
28 |
+
benchmark:
|
29 |
+
name: inference
|
30 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
31 |
+
duration: 10
|
32 |
+
warmup_runs: 10
|
33 |
+
memory: true
|
34 |
+
energy: true
|
35 |
+
input_shapes:
|
36 |
+
batch_size: 1
|
37 |
+
sequence_length: 256
|
38 |
+
num_choices: 1
|
39 |
+
feature_size: 80
|
40 |
+
nb_max_frames: 3000
|
41 |
+
audio_sequence_length: 16000
|
42 |
+
new_tokens: 256
|
43 |
+
can_diffuse: ${can_diffuse:${task}}
|
44 |
+
can_generate: ${can_generate:${task}}
|
45 |
+
forward_kwargs: {}
|
46 |
+
generate_kwargs: {}
|
47 |
+
launcher:
|
48 |
+
name: process
|
49 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
50 |
+
device_isolation: true
|
51 |
+
start_method: spawn
|
52 |
+
experiment_name: pytorch+cuda+float16+bnb-4bit
|
53 |
+
device: cuda
|
54 |
+
model: microsoft/phi-1_5
|
55 |
+
task: ${infer_task:${model}}
|
56 |
+
hub_kwargs:
|
57 |
+
revision: main
|
58 |
+
cache_dir: null
|
59 |
+
force_download: false
|
60 |
+
local_files_only: false
|
61 |
+
trust_remote_code: true
|
62 |
+
environment:
|
63 |
+
optimum_version: 1.16.1
|
64 |
+
optimum_commit: null
|
65 |
+
transformers_version: 4.36.2
|
66 |
+
transformers_commit: null
|
67 |
+
accelerate_version: 0.26.1
|
68 |
+
accelerate_commit: null
|
69 |
+
diffusers_version: null
|
70 |
+
diffusers_commit: null
|
71 |
+
python_version: 3.10.12
|
72 |
+
system: Linux
|
73 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
74 |
+
cpu_count: 32
|
75 |
+
cpu_ram_mb: 134796
|
76 |
+
gpus:
|
77 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16+bnb-4bit/microsoft/phi-1_5/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=microsoft/phi-1_5
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=microsoft/phi-1_5
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16+bnb-4bit
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+bnb-4bit/microsoft/phi-1_5
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16+bnb-4bit/microsoft/phi-1_5/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=microsoft/phi-1_5
|
audace/pytorch+cuda+float16+bnb-4bit/microsoft/phi-1_5/cli.log
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 11:08:00,092][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 11:08:00,092][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 11:08:00,094][process][INFO] - + Launched worker process with PID 685286.
|
4 |
+
[2024-01-13 11:08:00,094][isolation][INFO] - + Launched device(s) isolation process 685287.
|
audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/.hydra/config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: bnb
|
21 |
+
quantization_config:
|
22 |
+
load_in_4bit: true
|
23 |
+
data_parallel: false
|
24 |
+
deepspeed_inference: false
|
25 |
+
deepspeed_inference_config: {}
|
26 |
+
peft_strategy: null
|
27 |
+
peft_config: {}
|
28 |
+
benchmark:
|
29 |
+
name: inference
|
30 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
31 |
+
duration: 10
|
32 |
+
warmup_runs: 10
|
33 |
+
memory: true
|
34 |
+
energy: true
|
35 |
+
input_shapes:
|
36 |
+
batch_size: 1
|
37 |
+
sequence_length: 256
|
38 |
+
num_choices: 1
|
39 |
+
feature_size: 80
|
40 |
+
nb_max_frames: 3000
|
41 |
+
audio_sequence_length: 16000
|
42 |
+
new_tokens: 256
|
43 |
+
can_diffuse: ${can_diffuse:${task}}
|
44 |
+
can_generate: ${can_generate:${task}}
|
45 |
+
forward_kwargs: {}
|
46 |
+
generate_kwargs: {}
|
47 |
+
launcher:
|
48 |
+
name: process
|
49 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
50 |
+
device_isolation: true
|
51 |
+
start_method: spawn
|
52 |
+
experiment_name: pytorch+cuda+float16+bnb-4bit
|
53 |
+
device: cuda
|
54 |
+
model: yyjjtt/test-model
|
55 |
+
task: ${infer_task:${model}}
|
56 |
+
hub_kwargs:
|
57 |
+
revision: main
|
58 |
+
cache_dir: null
|
59 |
+
force_download: false
|
60 |
+
local_files_only: false
|
61 |
+
trust_remote_code: true
|
62 |
+
environment:
|
63 |
+
optimum_version: 1.16.1
|
64 |
+
optimum_commit: null
|
65 |
+
transformers_version: 4.36.2
|
66 |
+
transformers_commit: null
|
67 |
+
accelerate_version: 0.26.1
|
68 |
+
accelerate_commit: null
|
69 |
+
diffusers_version: null
|
70 |
+
diffusers_commit: null
|
71 |
+
python_version: 3.10.12
|
72 |
+
system: Linux
|
73 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
74 |
+
cpu_count: 32
|
75 |
+
cpu_ram_mb: 134796
|
76 |
+
gpus:
|
77 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=yyjjtt/test-model
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=yyjjtt/test-model
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16+bnb-4bit
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=yyjjtt/test-model
|
audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/cli.log
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 11:06:03,763][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 11:06:03,764][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 11:06:03,765][process][INFO] - + Launched worker process with PID 681420.
|
4 |
+
[2024-01-13 11:06:03,765][isolation][INFO] - + Launched device(s) isolation process 681421.
|
5 |
+
[2024-01-13 11:06:05,594][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
6 |
+
[2024-01-13 11:06:05,594][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
7 |
+
[2024-01-13 11:06:05,758][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
8 |
+
[2024-01-13 11:06:07,006][backend][INFO] - Configuring pytorch backend
|
9 |
+
[2024-01-13 11:06:07,006][pytorch][INFO] - + Inferred AutoModel class AutoModelForSeq2SeqLM for task text2text-generation and model_type llama
|
10 |
+
[2024-01-13 11:06:07,006][pytorch][INFO] - + Disabling gradients
|
11 |
+
[2024-01-13 11:06:07,006][pytorch][INFO] - + Processing quantization config
|
12 |
+
[2024-01-13 11:06:07,007][pytorch][INFO] - + Processing BitsAndBytes config
|
13 |
+
[2024-01-13 11:06:07,007][pytorch][INFO] - + Loading model with no weights
|
14 |
+
[2024-01-13 11:06:07,007][pytorch][INFO] - + Creating no weights model directory
|
15 |
+
[2024-01-13 11:06:07,007][pytorch][INFO] - + Saving pretrained config
|
16 |
+
[2024-01-13 11:06:07,008][pytorch][INFO] - + Creating no weights model
|
17 |
+
[2024-01-13 11:06:07,028][pytorch][INFO] - + Saving no weights model
|
18 |
+
[2024-01-13 11:06:07,029][pytorch][INFO] - + Loading no weights model
|
19 |
+
[2024-01-13 11:06:07,029][pytorch][INFO] - + Loading quantized model
|
20 |
+
[2024-01-13 11:06:07,035][experiment][ERROR] - Error during backend configuration: Unrecognized configuration class <class 'transformers.models.llama.configuration_llama.LlamaConfig'> for this kind of AutoModel: AutoModelForSeq2SeqLM.
|
21 |
+
Model type should be one of BartConfig, BigBirdPegasusConfig, BlenderbotConfig, BlenderbotSmallConfig, EncoderDecoderConfig, FSMTConfig, GPTSanJapaneseConfig, LEDConfig, LongT5Config, M2M100Config, MarianConfig, MBartConfig, MT5Config, MvpConfig, NllbMoeConfig, PegasusConfig, PegasusXConfig, PLBartConfig, ProphetNetConfig, SeamlessM4TConfig, SeamlessM4Tv2Config, SwitchTransformersConfig, T5Config, UMT5Config, XLMProphetNetConfig.
|
22 |
+
[2024-01-13 11:06:07,035][backend][INFO] - Cleaning pytorch backend
|
23 |
+
[2024-01-13 11:06:07,035][pytorch][INFO] - + Emptying CUDA cache
|
24 |
+
[2024-01-13 11:06:07,035][pytorch][INFO] - + Cleaning temporary directory
|
25 |
+
[2024-01-13 11:06:07,475][isolation][INFO] - + Closing device(s) isolation process...
|
26 |
+
[2024-01-13 11:06:07,484][process][ERROR] - Worker process exited with code 1, forwarding...
|
audace/pytorch+cuda+float16+bnb-4bit/yyjjtt/test-model/hydra_config.yaml
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: bnb
|
26 |
+
quantization_config:
|
27 |
+
llm_int8_threshold: 0.0
|
28 |
+
load_in_4bit: true
|
29 |
+
data_parallel: false
|
30 |
+
deepspeed_inference: false
|
31 |
+
deepspeed_inference_config: {}
|
32 |
+
peft_strategy: null
|
33 |
+
peft_config: {}
|
34 |
+
benchmark:
|
35 |
+
name: inference
|
36 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
37 |
+
duration: 10
|
38 |
+
warmup_runs: 10
|
39 |
+
memory: true
|
40 |
+
energy: true
|
41 |
+
input_shapes:
|
42 |
+
batch_size: 1
|
43 |
+
sequence_length: 256
|
44 |
+
num_choices: 1
|
45 |
+
feature_size: 80
|
46 |
+
nb_max_frames: 3000
|
47 |
+
audio_sequence_length: 16000
|
48 |
+
new_tokens: 256
|
49 |
+
can_diffuse: false
|
50 |
+
can_generate: true
|
51 |
+
forward_kwargs: {}
|
52 |
+
generate_kwargs:
|
53 |
+
num_return_sequences: 1
|
54 |
+
max_new_tokens: 256
|
55 |
+
min_new_tokens: 256
|
56 |
+
do_sample: false
|
57 |
+
use_cache: true
|
58 |
+
pad_token_id: 0
|
59 |
+
temperature: 1.0
|
60 |
+
num_beams: 1
|
61 |
+
experiment_name: pytorch+cuda+float16+bnb-4bit
|
62 |
+
device: cuda
|
63 |
+
model: yyjjtt/test-model
|
64 |
+
task: text2text-generation
|
65 |
+
hub_kwargs:
|
66 |
+
revision: main
|
67 |
+
cache_dir: null
|
68 |
+
force_download: false
|
69 |
+
local_files_only: false
|
70 |
+
trust_remote_code: true
|
71 |
+
environment:
|
72 |
+
optimum_version: 1.16.1
|
73 |
+
optimum_commit: null
|
74 |
+
transformers_version: 4.36.2
|
75 |
+
transformers_commit: null
|
76 |
+
accelerate_version: 0.26.1
|
77 |
+
accelerate_commit: null
|
78 |
+
diffusers_version: null
|
79 |
+
diffusers_commit: null
|
80 |
+
python_version: 3.10.12
|
81 |
+
system: Linux
|
82 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
83 |
+
cpu_count: 32
|
84 |
+
cpu_ram_mb: 134796
|
85 |
+
gpus:
|
86 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/.hydra/config.yaml
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: gptq
|
21 |
+
quantization_config:
|
22 |
+
bits: 4
|
23 |
+
use_cuda_fp16: false
|
24 |
+
use_exllama: true
|
25 |
+
exllama_config:
|
26 |
+
version: 1
|
27 |
+
data_parallel: false
|
28 |
+
deepspeed_inference: false
|
29 |
+
deepspeed_inference_config: {}
|
30 |
+
peft_strategy: null
|
31 |
+
peft_config: {}
|
32 |
+
benchmark:
|
33 |
+
name: inference
|
34 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
35 |
+
duration: 10
|
36 |
+
warmup_runs: 10
|
37 |
+
memory: true
|
38 |
+
energy: true
|
39 |
+
input_shapes:
|
40 |
+
batch_size: 1
|
41 |
+
sequence_length: 256
|
42 |
+
num_choices: 1
|
43 |
+
feature_size: 80
|
44 |
+
nb_max_frames: 3000
|
45 |
+
audio_sequence_length: 16000
|
46 |
+
new_tokens: 256
|
47 |
+
can_diffuse: ${can_diffuse:${task}}
|
48 |
+
can_generate: ${can_generate:${task}}
|
49 |
+
forward_kwargs: {}
|
50 |
+
generate_kwargs: {}
|
51 |
+
launcher:
|
52 |
+
name: process
|
53 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
54 |
+
device_isolation: true
|
55 |
+
start_method: spawn
|
56 |
+
experiment_name: pytorch+cuda+float16+gptq-4bit+exllama-v1
|
57 |
+
device: cuda
|
58 |
+
model: cyberagent/open-calm-large
|
59 |
+
task: ${infer_task:${model}}
|
60 |
+
hub_kwargs:
|
61 |
+
revision: main
|
62 |
+
cache_dir: null
|
63 |
+
force_download: false
|
64 |
+
local_files_only: false
|
65 |
+
trust_remote_code: true
|
66 |
+
environment:
|
67 |
+
optimum_version: 1.16.1
|
68 |
+
optimum_commit: null
|
69 |
+
transformers_version: 4.36.2
|
70 |
+
transformers_commit: null
|
71 |
+
accelerate_version: 0.26.1
|
72 |
+
accelerate_commit: null
|
73 |
+
diffusers_version: null
|
74 |
+
diffusers_commit: null
|
75 |
+
python_version: 3.10.12
|
76 |
+
system: Linux
|
77 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
78 |
+
cpu_count: 32
|
79 |
+
cpu_ram_mb: 134796
|
80 |
+
gpus:
|
81 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=cyberagent/open-calm-large
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=cyberagent/open-calm-large
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16+gptq-4bit+exllama-v1
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=cyberagent/open-calm-large
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/cli.log
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 11:07:09,699][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 11:07:09,699][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 11:07:09,701][process][INFO] - + Launched worker process with PID 683591.
|
4 |
+
[2024-01-13 11:07:09,701][isolation][INFO] - + Launched device(s) isolation process 683592.
|
5 |
+
[2024-01-13 11:07:11,679][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
6 |
+
[2024-01-13 11:07:11,679][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
7 |
+
[2024-01-13 11:07:11,823][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
8 |
+
[2024-01-13 11:07:12,857][backend][INFO] - Configuring pytorch backend
|
9 |
+
[2024-01-13 11:07:12,857][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt_neox
|
10 |
+
[2024-01-13 11:07:12,858][pytorch][INFO] - + Disabling gradients
|
11 |
+
[2024-01-13 11:07:12,858][pytorch][INFO] - + Processing quantization config
|
12 |
+
[2024-01-13 11:07:12,858][pytorch][INFO] - + Processing GPTQ config
|
13 |
+
[2024-01-13 11:07:12,858][pytorch][INFO] - + Loading model with no weights
|
14 |
+
[2024-01-13 11:07:12,858][pytorch][INFO] - + Creating no weights model directory
|
15 |
+
[2024-01-13 11:07:12,858][pytorch][INFO] - + Saving pretrained config
|
16 |
+
[2024-01-13 11:07:12,859][pytorch][INFO] - + Creating no weights model
|
17 |
+
[2024-01-13 11:07:12,859][pytorch][INFO] - + Saving no weights model
|
18 |
+
[2024-01-13 11:07:12,860][pytorch][INFO] - + Loading no weights model
|
19 |
+
[2024-01-13 11:07:12,860][pytorch][INFO] - + Loading quantized model
|
20 |
+
[2024-01-13 11:07:20,776][pytorch][INFO] - + Turning on model's eval mode
|
21 |
+
[2024-01-13 11:07:20,799][benchmark][INFO] - Configuring inference benchmark
|
22 |
+
[2024-01-13 11:07:20,799][inference][INFO] - Running inference benchmark
|
23 |
+
[2024-01-13 11:07:20,799][inference][INFO] - + Updating input shapes with model shapes
|
24 |
+
[2024-01-13 11:07:20,799][inference][INFO] - + Preparing backend for inference
|
25 |
+
[2024-01-13 11:07:20,799][inference][INFO] - + Creating input generator
|
26 |
+
[2024-01-13 11:07:20,799][input-generator][INFO] - Using text-generation task generator
|
27 |
+
[2024-01-13 11:07:20,799][inference][INFO] - + Preparing input for the forward pass
|
28 |
+
[2024-01-13 11:07:20,799][backend][INFO] - + Moving inputs tensors to device cuda
|
29 |
+
[2024-01-13 11:07:20,800][inference][INFO] - + Tracking forward pass peak memory
|
30 |
+
[2024-01-13 11:07:20,800][memory][INFO] - Tracking CUDA devices: [0]
|
31 |
+
[2024-01-13 11:07:20,800][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
32 |
+
[2024-01-13 11:07:20,994][inference][INFO] - + Forward pass max memory used: 1821 (MB)
|
33 |
+
[2024-01-13 11:07:20,994][inference][INFO] - + Forward pass max memory reserved: 960 (MB)
|
34 |
+
[2024-01-13 11:07:20,994][inference][INFO] - + Forward pass max memory allocated: 933 (MB)
|
35 |
+
[2024-01-13 11:07:20,994][inference][INFO] - + Preparing input for the generation pass
|
36 |
+
[2024-01-13 11:07:20,994][backend][INFO] - + Moving inputs tensors to device cuda
|
37 |
+
[2024-01-13 11:07:20,995][inference][INFO] - + Tracking generation pass peak memory
|
38 |
+
[2024-01-13 11:07:20,995][memory][INFO] - Tracking CUDA devices: [0]
|
39 |
+
[2024-01-13 11:07:20,995][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
40 |
+
[2024-01-13 11:07:23,195][inference][INFO] - + Generation pass max memory used: 1978 (MB)
|
41 |
+
[2024-01-13 11:07:23,195][inference][INFO] - + Generation pass max memory reserved: 1117 (MB)
|
42 |
+
[2024-01-13 11:07:23,195][inference][INFO] - + Generation pass max memory allocated: 983 (MB)
|
43 |
+
[2024-01-13 11:07:23,195][inference][INFO] - + Preparing input for the forward pass
|
44 |
+
[2024-01-13 11:07:23,195][backend][INFO] - + Moving inputs tensors to device cuda
|
45 |
+
[2024-01-13 11:07:23,195][inference][INFO] - + Warming up the forward pass
|
46 |
+
[2024-01-13 11:07:23,282][inference][INFO] - + Tracking forward pass latency and throughput
|
47 |
+
[2024-01-13 11:07:33,315][inference][INFO] - + Forward pass latency: 8.90e-03 (s)
|
48 |
+
[2024-01-13 11:07:33,316][inference][INFO] - + Forward pass throughput: 112.00 (samples/s)
|
49 |
+
[2024-01-13 11:07:33,316][inference][INFO] - + Preparing input for the generation pass
|
50 |
+
[2024-01-13 11:07:33,316][backend][INFO] - + Moving inputs tensors to device cuda
|
51 |
+
[2024-01-13 11:07:33,316][inference][INFO] - + Warming up the generation pass
|
52 |
+
[2024-01-13 11:07:35,340][inference][INFO] - + Tracking generation latency and throughput
|
53 |
+
[2024-01-13 11:07:45,358][inference][INFO] - + Generation pass latency: 2.00e+00 (s)
|
54 |
+
[2024-01-13 11:07:45,358][inference][INFO] - + Generation pass throughput: 128.00 (tokens/s)
|
55 |
+
[2024-01-13 11:07:45,358][inference][INFO] - + Preparing input for the forward pass
|
56 |
+
[2024-01-13 11:07:45,358][backend][INFO] - + Moving inputs tensors to device cuda
|
57 |
+
[2024-01-13 11:07:45,358][inference][INFO] - + Tracking forward pass energy consumption
|
58 |
+
[2024-01-13 11:07:59,905][inference][INFO] - + Forward pass energy consumption: 5.17e-07 (kWh/sample)
|
59 |
+
[2024-01-13 11:07:59,905][inference][INFO] - + Forward pass carbon emissions: 3.48e-08 (kgCO2eq/sample)
|
60 |
+
[2024-01-13 11:07:59,905][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/forward_codecarbon.csv
|
61 |
+
[2024-01-13 11:07:59,906][inference][INFO] - + Preparing input for the generation pass
|
62 |
+
[2024-01-13 11:07:59,906][backend][INFO] - + Moving inputs tensors to device cuda
|
63 |
+
[2024-01-13 11:07:59,906][inference][INFO] - + Tracking generation pass energy consumption
|
64 |
+
[2024-01-13 11:08:14,585][inference][INFO] - + Generation pass energy consumption: 2.88e-07 (kWh/token)
|
65 |
+
[2024-01-13 11:08:14,585][inference][INFO] - + Generation pass carbon emissions: 1.94e-08 (kgCO2eq/token)
|
66 |
+
[2024-01-13 11:08:14,585][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/generate_codecarbon.csv
|
67 |
+
[2024-01-13 11:08:14,585][inference][INFO] - Saving results
|
68 |
+
[2024-01-13 11:08:14,587][backend][INFO] - Cleaning pytorch backend
|
69 |
+
[2024-01-13 11:08:14,588][backend][INFO] - + Deleting pretrained model
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T11:07:59,codecarbon,c56bebb2-6f5b-431c-8b80-26e3cd645f7c,10.061407089233398,3.7831521891843866e-05,3.7600627383745325e-06,42.5,991.1384232323476,0.5320930480957031,0.00011877782079908585,0.0004411620195958754,1.4419950679243244e-06,0.0005613818354628856,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/cyberagent/open-calm-large/hydra_config.yaml
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: gptq
|
26 |
+
quantization_config:
|
27 |
+
bits: 4
|
28 |
+
use_cuda_fp16: false
|
29 |
+
use_exllama: true
|
30 |
+
exllama_config:
|
31 |
+
version: 1
|
32 |
+
data_parallel: false
|
33 |
+
deepspeed_inference: false
|
34 |
+
deepspeed_inference_config: {}
|
35 |
+
peft_strategy: null
|
36 |
+
peft_config: {}
|
37 |
+
benchmark:
|
38 |
+
name: inference
|
39 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
40 |
+
duration: 10
|
41 |
+
warmup_runs: 10
|
42 |
+
memory: true
|
43 |
+
energy: true
|
44 |
+
input_shapes:
|
45 |
+
batch_size: 1
|
46 |
+
sequence_length: 256
|
47 |
+
num_choices: 1
|
48 |
+
feature_size: 80
|
49 |
+
nb_max_frames: 3000
|
50 |
+
audio_sequence_length: 16000
|
51 |
+
new_tokens: 256
|
52 |
+
can_diffuse: false
|
53 |
+
can_generate: true
|
54 |
+
forward_kwargs: {}
|
55 |
+
generate_kwargs:
|
56 |
+
num_return_sequences: 1
|
57 |
+
max_new_tokens: 256
|
58 |
+
min_new_tokens: 256
|
59 |
+
do_sample: false
|
60 |
+
use_cache: true
|
61 |
+
pad_token_id: 0
|
62 |
+
temperature: 1.0
|
63 |
+
num_beams: 1
|
64 |
+
experiment_name: pytorch+cuda+float16+gptq-4bit+exllama-v1
|
65 |
+
device: cuda
|
66 |
+
model: cyberagent/open-calm-large
|
67 |
+
task: text-generation
|
68 |
+
hub_kwargs:
|
69 |
+
revision: main
|
70 |
+
cache_dir: null
|
71 |
+
force_download: false
|
72 |
+
local_files_only: false
|
73 |
+
trust_remote_code: true
|
74 |
+
environment:
|
75 |
+
optimum_version: 1.16.1
|
76 |
+
optimum_commit: null
|
77 |
+
transformers_version: 4.36.2
|
78 |
+
transformers_commit: null
|
79 |
+
accelerate_version: 0.26.1
|
80 |
+
accelerate_commit: null
|
81 |
+
diffusers_version: null
|
82 |
+
diffusers_commit: null
|
83 |
+
python_version: 3.10.12
|
84 |
+
system: Linux
|
85 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
86 |
+
cpu_count: 32
|
87 |
+
cpu_ram_mb: 134796
|
88 |
+
gpus:
|
89 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/cli.log
CHANGED
@@ -37,3 +37,75 @@
|
|
37 |
[2024-01-13 11:04:28,328][inference][INFO] - + Tracking generation pass peak memory
|
38 |
[2024-01-13 11:04:28,328][memory][INFO] - Tracking CUDA devices: [0]
|
39 |
[2024-01-13 11:04:28,328][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
[2024-01-13 11:04:28,328][inference][INFO] - + Tracking generation pass peak memory
|
38 |
[2024-01-13 11:04:28,328][memory][INFO] - Tracking CUDA devices: [0]
|
39 |
[2024-01-13 11:04:28,328][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
40 |
+
[2024-01-13 11:05:56,943][launcher][INFO] - Configuring process launcher
|
41 |
+
[2024-01-13 11:05:56,943][process][INFO] - Setting multiprocessing start method to spawn.
|
42 |
+
[2024-01-13 11:05:56,945][process][INFO] - + Launched worker process with PID 681010.
|
43 |
+
[2024-01-13 11:05:56,945][isolation][INFO] - + Launched device(s) isolation process 681011.
|
44 |
+
[2024-01-13 11:05:58,838][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
45 |
+
[2024-01-13 11:05:58,838][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
46 |
+
[2024-01-13 11:05:58,975][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
47 |
+
[2024-01-13 11:06:00,579][backend][INFO] - Configuring pytorch backend
|
48 |
+
[2024-01-13 11:06:00,579][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type phi
|
49 |
+
[2024-01-13 11:06:00,579][pytorch][INFO] - + Disabling gradients
|
50 |
+
[2024-01-13 11:06:00,579][pytorch][INFO] - + Processing quantization config
|
51 |
+
[2024-01-13 11:06:00,579][pytorch][INFO] - + Processing GPTQ config
|
52 |
+
[2024-01-13 11:06:00,579][pytorch][INFO] - + Loading model with no weights
|
53 |
+
[2024-01-13 11:06:00,580][pytorch][INFO] - + Creating no weights model directory
|
54 |
+
[2024-01-13 11:06:00,580][pytorch][INFO] - + Saving pretrained config
|
55 |
+
[2024-01-13 11:06:00,580][pytorch][INFO] - + Creating no weights model
|
56 |
+
[2024-01-13 11:06:00,581][pytorch][INFO] - + Saving no weights model
|
57 |
+
[2024-01-13 11:06:00,581][pytorch][INFO] - + Loading no weights model
|
58 |
+
[2024-01-13 11:06:00,581][pytorch][INFO] - + Loading quantized model
|
59 |
+
[2024-01-13 11:06:11,427][pytorch][INFO] - + Turning on model's eval mode
|
60 |
+
[2024-01-13 11:06:11,451][benchmark][INFO] - Configuring inference benchmark
|
61 |
+
[2024-01-13 11:06:11,451][inference][INFO] - Running inference benchmark
|
62 |
+
[2024-01-13 11:06:11,451][inference][INFO] - + Updating input shapes with model shapes
|
63 |
+
[2024-01-13 11:06:11,451][inference][INFO] - + Preparing backend for inference
|
64 |
+
[2024-01-13 11:06:11,451][inference][INFO] - + Creating input generator
|
65 |
+
[2024-01-13 11:06:11,451][input-generator][INFO] - Using text-generation task generator
|
66 |
+
[2024-01-13 11:06:11,451][inference][INFO] - + Preparing input for the forward pass
|
67 |
+
[2024-01-13 11:06:11,451][backend][INFO] - + Moving inputs tensors to device cuda
|
68 |
+
[2024-01-13 11:06:11,451][inference][INFO] - + Tracking forward pass peak memory
|
69 |
+
[2024-01-13 11:06:11,451][memory][INFO] - Tracking CUDA devices: [0]
|
70 |
+
[2024-01-13 11:06:11,451][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
71 |
+
[2024-01-13 11:06:11,614][inference][INFO] - + Forward pass max memory used: 2137 (MB)
|
72 |
+
[2024-01-13 11:06:11,614][inference][INFO] - + Forward pass max memory reserved: 1277 (MB)
|
73 |
+
[2024-01-13 11:06:11,614][inference][INFO] - + Forward pass max memory allocated: 1229 (MB)
|
74 |
+
[2024-01-13 11:06:11,614][inference][INFO] - + Preparing input for the generation pass
|
75 |
+
[2024-01-13 11:06:11,614][backend][INFO] - + Moving inputs tensors to device cuda
|
76 |
+
[2024-01-13 11:06:11,614][inference][INFO] - + Tracking generation pass peak memory
|
77 |
+
[2024-01-13 11:06:11,614][memory][INFO] - Tracking CUDA devices: [0]
|
78 |
+
[2024-01-13 11:06:11,614][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
79 |
+
[2024-01-13 11:06:14,034][inference][INFO] - + Generation pass max memory used: 2263 (MB)
|
80 |
+
[2024-01-13 11:06:14,034][inference][INFO] - + Generation pass max memory reserved: 1402 (MB)
|
81 |
+
[2024-01-13 11:06:14,034][inference][INFO] - + Generation pass max memory allocated: 1304 (MB)
|
82 |
+
[2024-01-13 11:06:14,034][inference][INFO] - + Preparing input for the forward pass
|
83 |
+
[2024-01-13 11:06:14,034][backend][INFO] - + Moving inputs tensors to device cuda
|
84 |
+
[2024-01-13 11:06:14,034][inference][INFO] - + Warming up the forward pass
|
85 |
+
[2024-01-13 11:06:14,130][inference][INFO] - + Tracking forward pass latency and throughput
|
86 |
+
[2024-01-13 11:06:24,158][inference][INFO] - + Forward pass latency: 9.86e-03 (s)
|
87 |
+
[2024-01-13 11:06:24,158][inference][INFO] - + Forward pass throughput: 101.00 (samples/s)
|
88 |
+
[2024-01-13 11:06:24,167][inference][INFO] - + Preparing input for the generation pass
|
89 |
+
[2024-01-13 11:06:24,167][backend][INFO] - + Moving inputs tensors to device cuda
|
90 |
+
[2024-01-13 11:06:24,167][inference][INFO] - + Warming up the generation pass
|
91 |
+
[2024-01-13 11:06:26,358][inference][INFO] - + Tracking generation latency and throughput
|
92 |
+
[2024-01-13 11:06:37,364][inference][INFO] - + Generation pass latency: 2.20e+00 (s)
|
93 |
+
[2024-01-13 11:06:37,365][inference][INFO] - + Generation pass throughput: 116.00 (tokens/s)
|
94 |
+
[2024-01-13 11:06:37,365][inference][INFO] - + Preparing input for the forward pass
|
95 |
+
[2024-01-13 11:06:37,365][backend][INFO] - + Moving inputs tensors to device cuda
|
96 |
+
[2024-01-13 11:06:37,365][inference][INFO] - + Tracking forward pass energy consumption
|
97 |
+
[2024-01-13 11:06:51,836][inference][INFO] - + Forward pass energy consumption: 7.4e-07 (kWh/sample)
|
98 |
+
[2024-01-13 11:06:51,836][inference][INFO] - + Forward pass carbon emissions: 4.99e-08 (kgCO2eq/sample)
|
99 |
+
[2024-01-13 11:06:51,836][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/forward_codecarbon.csv
|
100 |
+
[2024-01-13 11:06:51,836][inference][INFO] - + Preparing input for the generation pass
|
101 |
+
[2024-01-13 11:06:51,836][backend][INFO] - + Moving inputs tensors to device cuda
|
102 |
+
[2024-01-13 11:06:51,836][inference][INFO] - + Tracking generation pass energy consumption
|
103 |
+
[2024-01-13 11:07:07,256][inference][INFO] - + Generation pass energy consumption: 3.43e-07 (kWh/token)
|
104 |
+
[2024-01-13 11:07:07,256][inference][INFO] - + Generation pass carbon emissions: 2.31e-08 (kgCO2eq/token)
|
105 |
+
[2024-01-13 11:07:07,256][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/generate_codecarbon.csv
|
106 |
+
[2024-01-13 11:07:07,256][inference][INFO] - Saving results
|
107 |
+
[2024-01-13 11:07:07,259][backend][INFO] - Cleaning pytorch backend
|
108 |
+
[2024-01-13 11:07:07,259][backend][INFO] - + Deleting pretrained model
|
109 |
+
[2024-01-13 11:07:07,348][pytorch][INFO] - + Emptying CUDA cache
|
110 |
+
[2024-01-13 11:07:07,376][pytorch][INFO] - + Cleaning temporary directory
|
111 |
+
[2024-01-13 11:07:07,951][isolation][INFO] - + Closing device(s) isolation process...
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T11:06:51,codecarbon,a4bf8c25-d96a-4577-83c2-b2fb345ff933,10.038506746292114,5.249870332200214e-05,5.2297323345818725e-06,42.5,1500.501394364018,0.7521185874938965,0.0001185076665547159,0.0006584880267901383,2.0324016544354127e-06,0.0007790280949992897,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T11:07:07,codecarbon,0e2740ed-cd10-4099-be64-351294f1de62,10.990113258361816,2.9607247093812628e-05,2.693989260873721e-06,42.5,103.8406252240754,0.752471923828125,0.0001297416314482689,0.0003073641347799416,2.2360811350871283e-06,0.0004393418473632976,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/microsoft/phi-1_5/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.00986,101.0,2137,2137,1229,1277,7.4e-07,4.99e-08,2.2,116.0,2.19,116.0,2263,2263,1304,1402,3.43e-07,2.31e-08
|
audace/pytorch+cuda+float16+gptq-4bit+exllama-v1/yyjjtt/test-model/cli.log
CHANGED
@@ -24,3 +24,29 @@ Model type should be one of BartConfig, BigBirdPegasusConfig, BlenderbotConfig,
|
|
24 |
[2024-01-13 11:02:50,875][pytorch][INFO] - + Cleaning temporary directory
|
25 |
[2024-01-13 11:02:51,375][isolation][INFO] - + Closing device(s) isolation process...
|
26 |
[2024-01-13 11:02:51,387][process][ERROR] - Worker process exited with code 1, forwarding...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
[2024-01-13 11:02:50,875][pytorch][INFO] - + Cleaning temporary directory
|
25 |
[2024-01-13 11:02:51,375][isolation][INFO] - + Closing device(s) isolation process...
|
26 |
[2024-01-13 11:02:51,387][process][ERROR] - Worker process exited with code 1, forwarding...
|
27 |
+
[2024-01-13 11:05:51,517][launcher][INFO] - Configuring process launcher
|
28 |
+
[2024-01-13 11:05:51,517][process][INFO] - Setting multiprocessing start method to spawn.
|
29 |
+
[2024-01-13 11:05:51,519][process][INFO] - + Launched worker process with PID 680722.
|
30 |
+
[2024-01-13 11:05:51,519][isolation][INFO] - + Launched device(s) isolation process 680723.
|
31 |
+
[2024-01-13 11:05:53,329][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
32 |
+
[2024-01-13 11:05:53,329][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
33 |
+
[2024-01-13 11:05:53,465][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
34 |
+
[2024-01-13 11:05:54,702][backend][INFO] - Configuring pytorch backend
|
35 |
+
[2024-01-13 11:05:54,702][pytorch][INFO] - + Inferred AutoModel class AutoModelForSeq2SeqLM for task text2text-generation and model_type llama
|
36 |
+
[2024-01-13 11:05:54,702][pytorch][INFO] - + Disabling gradients
|
37 |
+
[2024-01-13 11:05:54,702][pytorch][INFO] - + Processing quantization config
|
38 |
+
[2024-01-13 11:05:54,702][pytorch][INFO] - + Processing GPTQ config
|
39 |
+
[2024-01-13 11:05:54,702][pytorch][INFO] - + Loading model with no weights
|
40 |
+
[2024-01-13 11:05:54,703][pytorch][INFO] - + Creating no weights model directory
|
41 |
+
[2024-01-13 11:05:54,703][pytorch][INFO] - + Saving pretrained config
|
42 |
+
[2024-01-13 11:05:54,703][pytorch][INFO] - + Creating no weights model
|
43 |
+
[2024-01-13 11:05:54,704][pytorch][INFO] - + Saving no weights model
|
44 |
+
[2024-01-13 11:05:54,704][pytorch][INFO] - + Loading no weights model
|
45 |
+
[2024-01-13 11:05:54,704][pytorch][INFO] - + Loading quantized model
|
46 |
+
[2024-01-13 11:05:54,710][experiment][ERROR] - Error during backend configuration: Unrecognized configuration class <class 'transformers.models.llama.configuration_llama.LlamaConfig'> for this kind of AutoModel: AutoModelForSeq2SeqLM.
|
47 |
+
Model type should be one of BartConfig, BigBirdPegasusConfig, BlenderbotConfig, BlenderbotSmallConfig, EncoderDecoderConfig, FSMTConfig, GPTSanJapaneseConfig, LEDConfig, LongT5Config, M2M100Config, MarianConfig, MBartConfig, MT5Config, MvpConfig, NllbMoeConfig, PegasusConfig, PegasusXConfig, PLBartConfig, ProphetNetConfig, SeamlessM4TConfig, SeamlessM4Tv2Config, SwitchTransformersConfig, T5Config, UMT5Config, XLMProphetNetConfig.
|
48 |
+
[2024-01-13 11:05:54,710][backend][INFO] - Cleaning pytorch backend
|
49 |
+
[2024-01-13 11:05:54,710][pytorch][INFO] - + Emptying CUDA cache
|
50 |
+
[2024-01-13 11:05:54,710][pytorch][INFO] - + Cleaning temporary directory
|
51 |
+
[2024-01-13 11:05:55,154][isolation][INFO] - + Closing device(s) isolation process...
|
52 |
+
[2024-01-13 11:05:55,171][process][ERROR] - Worker process exited with code 1, forwarding...
|