Commit
•
fd5e220
1
Parent(s):
39ac210
Update dataset
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- audace/perf-report.csv +0 -0
- audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/.hydra/config.yaml +77 -0
- audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/cli.log +19 -0
- audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/hydra_config.yaml +86 -0
- audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/.hydra/config.yaml +77 -0
- audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/cli.log +76 -0
- audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/forward_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/generate_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/hydra_config.yaml +85 -0
- audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/inference_results.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/.hydra/config.yaml +76 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/cli.log +72 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/forward_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/generate_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/hydra_config.yaml +84 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/inference_results.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/.hydra/config.yaml +76 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/cli.log +72 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/forward_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/generate_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/hydra_config.yaml +84 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/inference_results.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/.hydra/config.yaml +76 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/cli.log +72 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/forward_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/generate_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/hydra_config.yaml +84 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/inference_results.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/.hydra/config.yaml +76 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/cli.log +72 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/forward_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/generate_codecarbon.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/hydra_config.yaml +84 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/inference_results.csv +2 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/.hydra/config.yaml +76 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/.hydra/hydra.yaml +176 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/.hydra/overrides.yaml +1 -0
- audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/cli.log +72 -0
audace/perf-report.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/.hydra/config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: bnb
|
21 |
+
quantization_config:
|
22 |
+
load_in_4bit: true
|
23 |
+
data_parallel: false
|
24 |
+
deepspeed_inference: false
|
25 |
+
deepspeed_inference_config: {}
|
26 |
+
peft_strategy: null
|
27 |
+
peft_config: {}
|
28 |
+
benchmark:
|
29 |
+
name: inference
|
30 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
31 |
+
duration: 10
|
32 |
+
warmup_runs: 10
|
33 |
+
memory: true
|
34 |
+
energy: true
|
35 |
+
input_shapes:
|
36 |
+
batch_size: 1
|
37 |
+
sequence_length: 256
|
38 |
+
num_choices: 1
|
39 |
+
feature_size: 80
|
40 |
+
nb_max_frames: 3000
|
41 |
+
audio_sequence_length: 16000
|
42 |
+
new_tokens: 256
|
43 |
+
can_diffuse: ${can_diffuse:${task}}
|
44 |
+
can_generate: ${can_generate:${task}}
|
45 |
+
forward_kwargs: {}
|
46 |
+
generate_kwargs: {}
|
47 |
+
launcher:
|
48 |
+
name: process
|
49 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
50 |
+
device_isolation: true
|
51 |
+
start_method: spawn
|
52 |
+
experiment_name: pytorch+cuda+float16+bnb-4bit
|
53 |
+
device: cuda
|
54 |
+
model: TigerResearch/tigerbot-13b-base
|
55 |
+
task: ${infer_task:${model}}
|
56 |
+
hub_kwargs:
|
57 |
+
revision: main
|
58 |
+
cache_dir: null
|
59 |
+
force_download: false
|
60 |
+
local_files_only: false
|
61 |
+
trust_remote_code: true
|
62 |
+
environment:
|
63 |
+
optimum_version: 1.16.1
|
64 |
+
optimum_commit: null
|
65 |
+
transformers_version: 4.36.2
|
66 |
+
transformers_commit: null
|
67 |
+
accelerate_version: 0.26.1
|
68 |
+
accelerate_commit: null
|
69 |
+
diffusers_version: null
|
70 |
+
diffusers_commit: null
|
71 |
+
python_version: 3.10.12
|
72 |
+
system: Linux
|
73 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
74 |
+
cpu_count: 32
|
75 |
+
cpu_ram_mb: 134796
|
76 |
+
gpus:
|
77 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=TigerResearch/tigerbot-13b-base
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=TigerResearch/tigerbot-13b-base
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16+bnb-4bit
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=TigerResearch/tigerbot-13b-base
|
audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/cli.log
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 09:15:00,813][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 09:15:00,814][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 09:15:00,815][process][INFO] - + Launched worker process with PID 440606.
|
4 |
+
[2024-01-13 09:15:00,815][isolation][INFO] - + Launched device(s) isolation process 440607.
|
5 |
+
[2024-01-13 09:15:03,008][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
6 |
+
[2024-01-13 09:15:03,009][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
7 |
+
[2024-01-13 09:15:03,165][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
8 |
+
[2024-01-13 09:15:04,972][backend][INFO] - Configuring pytorch backend
|
9 |
+
[2024-01-13 09:15:04,973][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
10 |
+
[2024-01-13 09:15:04,973][pytorch][INFO] - + Disabling gradients
|
11 |
+
[2024-01-13 09:15:04,973][pytorch][INFO] - + Processing quantization config
|
12 |
+
[2024-01-13 09:15:04,973][pytorch][INFO] - + Processing BitsAndBytes config
|
13 |
+
[2024-01-13 09:15:04,973][pytorch][INFO] - + Loading model with no weights
|
14 |
+
[2024-01-13 09:15:04,973][pytorch][INFO] - + Creating no weights model directory
|
15 |
+
[2024-01-13 09:15:04,973][pytorch][INFO] - + Saving pretrained config
|
16 |
+
[2024-01-13 09:15:04,974][pytorch][INFO] - + Creating no weights model
|
17 |
+
[2024-01-13 09:15:04,992][pytorch][INFO] - + Saving no weights model
|
18 |
+
[2024-01-13 09:15:04,993][pytorch][INFO] - + Loading no weights model
|
19 |
+
[2024-01-13 09:15:04,993][pytorch][INFO] - + Loading quantized model
|
audace/pytorch+cuda+float16+bnb-4bit/TigerResearch/tigerbot-13b-base/hydra_config.yaml
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: bnb
|
26 |
+
quantization_config:
|
27 |
+
llm_int8_threshold: 0.0
|
28 |
+
load_in_4bit: true
|
29 |
+
data_parallel: false
|
30 |
+
deepspeed_inference: false
|
31 |
+
deepspeed_inference_config: {}
|
32 |
+
peft_strategy: null
|
33 |
+
peft_config: {}
|
34 |
+
benchmark:
|
35 |
+
name: inference
|
36 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
37 |
+
duration: 10
|
38 |
+
warmup_runs: 10
|
39 |
+
memory: true
|
40 |
+
energy: true
|
41 |
+
input_shapes:
|
42 |
+
batch_size: 1
|
43 |
+
sequence_length: 256
|
44 |
+
num_choices: 1
|
45 |
+
feature_size: 80
|
46 |
+
nb_max_frames: 3000
|
47 |
+
audio_sequence_length: 16000
|
48 |
+
new_tokens: 256
|
49 |
+
can_diffuse: false
|
50 |
+
can_generate: true
|
51 |
+
forward_kwargs: {}
|
52 |
+
generate_kwargs:
|
53 |
+
num_return_sequences: 1
|
54 |
+
max_new_tokens: 256
|
55 |
+
min_new_tokens: 256
|
56 |
+
do_sample: false
|
57 |
+
use_cache: true
|
58 |
+
pad_token_id: 0
|
59 |
+
temperature: 1.0
|
60 |
+
num_beams: 1
|
61 |
+
experiment_name: pytorch+cuda+float16+bnb-4bit
|
62 |
+
device: cuda
|
63 |
+
model: TigerResearch/tigerbot-13b-base
|
64 |
+
task: text-generation
|
65 |
+
hub_kwargs:
|
66 |
+
revision: main
|
67 |
+
cache_dir: null
|
68 |
+
force_download: false
|
69 |
+
local_files_only: false
|
70 |
+
trust_remote_code: true
|
71 |
+
environment:
|
72 |
+
optimum_version: 1.16.1
|
73 |
+
optimum_commit: null
|
74 |
+
transformers_version: 4.36.2
|
75 |
+
transformers_commit: null
|
76 |
+
accelerate_version: 0.26.1
|
77 |
+
accelerate_commit: null
|
78 |
+
diffusers_version: null
|
79 |
+
diffusers_commit: null
|
80 |
+
python_version: 3.10.12
|
81 |
+
system: Linux
|
82 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
83 |
+
cpu_count: 32
|
84 |
+
cpu_ram_mb: 134796
|
85 |
+
gpus:
|
86 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/.hydra/config.yaml
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
continuous_isolation: true
|
9 |
+
isolation_check_interval: null
|
10 |
+
delete_cache: false
|
11 |
+
no_weights: true
|
12 |
+
device_map: null
|
13 |
+
torch_dtype: float16
|
14 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
15 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
16 |
+
amp_autocast: false
|
17 |
+
amp_dtype: null
|
18 |
+
torch_compile: false
|
19 |
+
torch_compile_config: {}
|
20 |
+
to_bettertransformer: false
|
21 |
+
use_flash_attention_2: false
|
22 |
+
quantization_scheme: null
|
23 |
+
quantization_config: {}
|
24 |
+
data_parallel: false
|
25 |
+
deepspeed_inference: false
|
26 |
+
deepspeed_inference_config: {}
|
27 |
+
peft_strategy: null
|
28 |
+
peft_config: {}
|
29 |
+
benchmark:
|
30 |
+
name: inference
|
31 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
32 |
+
duration: 10
|
33 |
+
warmup_runs: 10
|
34 |
+
memory: true
|
35 |
+
energy: true
|
36 |
+
input_shapes:
|
37 |
+
batch_size: 1
|
38 |
+
sequence_length: 256
|
39 |
+
num_choices: 1
|
40 |
+
feature_size: 80
|
41 |
+
nb_max_frames: 3000
|
42 |
+
audio_sequence_length: 16000
|
43 |
+
new_tokens: 256
|
44 |
+
can_diffuse: ${can_diffuse:${task}}
|
45 |
+
can_generate: ${can_generate:${task}}
|
46 |
+
forward_kwargs: {}
|
47 |
+
generate_kwargs: {}
|
48 |
+
launcher:
|
49 |
+
name: process
|
50 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
51 |
+
start_method: spawn
|
52 |
+
experiment_name: pytorch+cuda+float16
|
53 |
+
device: cuda
|
54 |
+
model: AI-Sweden-Models/gpt-sw3-1.3b
|
55 |
+
task: ${infer_task:${model}}
|
56 |
+
hub_kwargs:
|
57 |
+
revision: main
|
58 |
+
cache_dir: null
|
59 |
+
force_download: false
|
60 |
+
local_files_only: false
|
61 |
+
trust_remote_code: true
|
62 |
+
environment:
|
63 |
+
optimum_version: 1.16.1
|
64 |
+
optimum_commit: null
|
65 |
+
transformers_version: 4.36.2
|
66 |
+
transformers_commit: null
|
67 |
+
accelerate_version: 0.25.0
|
68 |
+
accelerate_commit: null
|
69 |
+
diffusers_version: null
|
70 |
+
diffusers_commit: null
|
71 |
+
python_version: 3.10.12
|
72 |
+
system: Linux
|
73 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
74 |
+
cpu_count: 32
|
75 |
+
cpu_ram_mb: 134796
|
76 |
+
gpus:
|
77 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=AI-Sweden-Models/gpt-sw3-1.3b
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=AI-Sweden-Models/gpt-sw3-1.3b
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=AI-Sweden-Models/gpt-sw3-1.3b
|
audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/cli.log
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-10 02:39:11,179][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-10 02:39:11,179][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-10 02:39:11,180][process][INFO] - + Launched experiment in process with PID 73076.
|
4 |
+
[2024-01-10 02:39:13,201][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
5 |
+
[2024-01-10 02:39:13,201][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
6 |
+
[2024-01-10 02:39:13,359][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
7 |
+
[2024-01-10 02:39:14,738][backend][WARNING] - Could not find the model's generation config
|
8 |
+
[2024-01-10 02:39:14,738][backend][INFO] - Configuring pytorch backend
|
9 |
+
[2024-01-10 02:39:14,738][backend][INFO] - + Running continuous isolation check
|
10 |
+
[2024-01-10 02:39:14,739][backend][INFO] - + Started isolation process with PID 73302
|
11 |
+
[2024-01-10 02:39:14,739][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
|
12 |
+
[2024-01-10 02:39:14,739][pytorch][INFO] - + Disabling gradients
|
13 |
+
[2024-01-10 02:39:14,739][pytorch][INFO] - + Loading model with no weights
|
14 |
+
[2024-01-10 02:39:14,739][pytorch][INFO] - + Creating no weights model directory
|
15 |
+
[2024-01-10 02:39:14,739][pytorch][INFO] - + Saving pretrained config to /tmp/tmp8n0oqyqc/no_weights
|
16 |
+
[2024-01-10 02:39:14,740][pytorch][INFO] - + Creating no weights model to /tmp/tmp8n0oqyqc/no_weights
|
17 |
+
[2024-01-10 02:39:14,760][pytorch][INFO] - + Saving no weights model to /tmp/tmp8n0oqyqc/no_weights
|
18 |
+
[2024-01-10 02:39:14,760][pytorch][INFO] - + Loading no weights model
|
19 |
+
[2024-01-10 02:39:14,760][pytorch][INFO] - + Loading model directly on device: cuda
|
20 |
+
[2024-01-10 02:39:14,926][pytorch][INFO] - + Randomizing model weights
|
21 |
+
[2024-01-10 02:39:14,928][pytorch][INFO] - + Tying model weights after randomization
|
22 |
+
[2024-01-10 02:39:14,929][pytorch][INFO] - + Turning on model's eval mode
|
23 |
+
[2024-01-10 02:39:14,991][benchmark][INFO] - Configuring inference benchmark
|
24 |
+
[2024-01-10 02:39:14,991][inference][INFO] - Running inference benchmark
|
25 |
+
[2024-01-10 02:39:14,991][inference][INFO] - + Updating input shapes with model shapes
|
26 |
+
[2024-01-10 02:39:14,991][inference][INFO] - + Preparing backend for inference
|
27 |
+
[2024-01-10 02:39:14,991][inference][INFO] - + Creating input generator
|
28 |
+
[2024-01-10 02:39:14,991][input-generator][INFO] - Using text-generation task generator
|
29 |
+
[2024-01-10 02:39:14,991][inference][INFO] - + Preparing input for the forward pass
|
30 |
+
[2024-01-10 02:39:14,991][backend][INFO] - + Moving inputs tensors to device cuda
|
31 |
+
[2024-01-10 02:39:14,992][inference][INFO] - + Tracking forward pass peak memory
|
32 |
+
[2024-01-10 02:39:14,992][memory][INFO] - Tracking CUDA devices: [0]
|
33 |
+
[2024-01-10 02:39:14,992][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
34 |
+
[2024-01-10 02:39:15,220][inference][INFO] - + Forward pass max memory used: 3715 (MB)
|
35 |
+
[2024-01-10 02:39:15,220][inference][INFO] - + Forward pass max memory reserved: 2856 (MB)
|
36 |
+
[2024-01-10 02:39:15,220][inference][INFO] - + Forward pass max memory allocated: 2831 (MB)
|
37 |
+
[2024-01-10 02:39:15,220][inference][INFO] - + Preparing input for the generation pass
|
38 |
+
[2024-01-10 02:39:15,220][backend][INFO] - + Moving inputs tensors to device cuda
|
39 |
+
[2024-01-10 02:39:15,220][inference][INFO] - + Tracking generation pass peak memory
|
40 |
+
[2024-01-10 02:39:15,220][memory][INFO] - Tracking CUDA devices: [0]
|
41 |
+
[2024-01-10 02:39:15,221][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
42 |
+
[2024-01-10 02:39:16,154][isolation][INFO] - Continuously checking only permitted process(es) {73076, 73302} are running on isolated device(s) {0}
|
43 |
+
[2024-01-10 02:39:17,090][inference][INFO] - + Generation pass max memory used: 3903 (MB)
|
44 |
+
[2024-01-10 02:39:17,090][inference][INFO] - + Generation pass max memory reserved: 3045 (MB)
|
45 |
+
[2024-01-10 02:39:17,090][inference][INFO] - + Generation pass max memory allocated: 2999 (MB)
|
46 |
+
[2024-01-10 02:39:17,090][inference][INFO] - + Preparing input for the forward pass
|
47 |
+
[2024-01-10 02:39:17,090][backend][INFO] - + Moving inputs tensors to device cuda
|
48 |
+
[2024-01-10 02:39:17,090][inference][INFO] - + Warming up the forward pass
|
49 |
+
[2024-01-10 02:39:17,177][inference][INFO] - + Tracking forward pass latency and throughput
|
50 |
+
[2024-01-10 02:39:27,205][inference][INFO] - + Forward pass latency: 8.82e-03 (s)
|
51 |
+
[2024-01-10 02:39:27,205][inference][INFO] - + Forward pass throughput: 113.00 (samples/s)
|
52 |
+
[2024-01-10 02:39:27,206][inference][INFO] - + Preparing input for the generation pass
|
53 |
+
[2024-01-10 02:39:27,206][backend][INFO] - + Moving inputs tensors to device cuda
|
54 |
+
[2024-01-10 02:39:27,206][inference][INFO] - + Warming up the generation pass
|
55 |
+
[2024-01-10 02:39:28,896][inference][INFO] - + Tracking generation latency and throughput
|
56 |
+
[2024-01-10 02:39:39,027][inference][INFO] - + Generation pass latency: 1.69e+00 (s)
|
57 |
+
[2024-01-10 02:39:39,027][inference][INFO] - + Generation pass throughput: 151.00 (tokens/s)
|
58 |
+
[2024-01-10 02:39:39,027][inference][INFO] - + Preparing input for the forward pass
|
59 |
+
[2024-01-10 02:39:39,027][backend][INFO] - + Moving inputs tensors to device cuda
|
60 |
+
[2024-01-10 02:39:39,027][inference][INFO] - + Tracking forward pass energy consumption
|
61 |
+
[2024-01-10 02:39:53,473][inference][INFO] - + Forward pass energy consumption: 7.84e-07 (kWh/sample)
|
62 |
+
[2024-01-10 02:39:53,474][inference][INFO] - + Forward pass carbon emissions: 5.28e-08 (kgCO2eq/sample)
|
63 |
+
[2024-01-10 02:39:53,474][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/forward_codecarbon.csv
|
64 |
+
[2024-01-10 02:39:53,474][inference][INFO] - + Preparing input for the generation pass
|
65 |
+
[2024-01-10 02:39:53,474][backend][INFO] - + Moving inputs tensors to device cuda
|
66 |
+
[2024-01-10 02:39:53,474][inference][INFO] - + Tracking generation pass energy consumption
|
67 |
+
[2024-01-10 02:40:09,533][inference][INFO] - + Generation pass energy consumption: 3.69e-07 (kWh/token)
|
68 |
+
[2024-01-10 02:40:09,533][inference][INFO] - + Generation pass carbon emissions: 2.48e-08 (kgCO2eq/token)
|
69 |
+
[2024-01-10 02:40:09,533][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/generate_codecarbon.csv
|
70 |
+
[2024-01-10 02:40:09,533][inference][INFO] - Saving results
|
71 |
+
[2024-01-10 02:40:09,535][backend][INFO] - Cleaning pytorch backend
|
72 |
+
[2024-01-10 02:40:09,535][backend][INFO] - + Terminating isolation process
|
73 |
+
[2024-01-10 02:40:09,552][backend][INFO] - + Deleting pretrained model
|
74 |
+
[2024-01-10 02:40:09,718][pytorch][INFO] - + Emptying CUDA cache
|
75 |
+
[2024-01-10 02:40:09,822][pytorch][INFO] - + Cleaning temporary directory
|
76 |
+
[2024-01-10 02:40:10,361][process][INFO] - + Process exited successfully, closing it.
|
audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-10T02:39:53,codecarbon,f07070fd-42c0-4923-8ea5-f915461a070d,10.036018133163452,6.121168747844664e-05,6.099200566026888e-06,42.5,0.0,0.4853367805480957,0.00011847845878865984,0.0007885197974819846,1.3217834748168874e-06,0.0009083200397454613,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-10T02:40:09,codecarbon,e8ac54f0-ce40-45ee-9dad-429e8e6697a0,11.604247808456421,4.450521381776943e-05,3.8352519312227135e-06,42.5,180.57111186485835,0.4855942726135254,0.00013699179159270395,0.0005218854175080279,1.535520054475607e-06,0.0006604127291552073,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/hydra_config.yaml
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
start_method: spawn
|
5 |
+
backend:
|
6 |
+
name: pytorch
|
7 |
+
version: 2.1.2+cu118
|
8 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
9 |
+
seed: 42
|
10 |
+
inter_op_num_threads: null
|
11 |
+
intra_op_num_threads: null
|
12 |
+
continuous_isolation: true
|
13 |
+
isolation_check_interval: 1.0
|
14 |
+
delete_cache: false
|
15 |
+
no_weights: true
|
16 |
+
device_map: null
|
17 |
+
torch_dtype: float16
|
18 |
+
eval_mode: true
|
19 |
+
disable_grad: true
|
20 |
+
amp_autocast: false
|
21 |
+
amp_dtype: null
|
22 |
+
torch_compile: false
|
23 |
+
torch_compile_config: {}
|
24 |
+
to_bettertransformer: false
|
25 |
+
use_flash_attention_2: false
|
26 |
+
quantization_scheme: null
|
27 |
+
quantization_config: {}
|
28 |
+
data_parallel: false
|
29 |
+
deepspeed_inference: false
|
30 |
+
deepspeed_inference_config: {}
|
31 |
+
peft_strategy: null
|
32 |
+
peft_config: {}
|
33 |
+
benchmark:
|
34 |
+
name: inference
|
35 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
36 |
+
duration: 10
|
37 |
+
warmup_runs: 10
|
38 |
+
memory: true
|
39 |
+
energy: true
|
40 |
+
input_shapes:
|
41 |
+
batch_size: 1
|
42 |
+
sequence_length: 256
|
43 |
+
num_choices: 1
|
44 |
+
feature_size: 80
|
45 |
+
nb_max_frames: 3000
|
46 |
+
audio_sequence_length: 16000
|
47 |
+
new_tokens: 256
|
48 |
+
can_diffuse: false
|
49 |
+
can_generate: true
|
50 |
+
forward_kwargs: {}
|
51 |
+
generate_kwargs:
|
52 |
+
num_return_sequences: 1
|
53 |
+
max_new_tokens: 256
|
54 |
+
min_new_tokens: 256
|
55 |
+
do_sample: false
|
56 |
+
use_cache: true
|
57 |
+
pad_token_id: 0
|
58 |
+
temperature: 1.0
|
59 |
+
num_beams: 1
|
60 |
+
experiment_name: pytorch+cuda+float16
|
61 |
+
device: cuda
|
62 |
+
model: AI-Sweden-Models/gpt-sw3-1.3b
|
63 |
+
task: text-generation
|
64 |
+
hub_kwargs:
|
65 |
+
revision: main
|
66 |
+
cache_dir: null
|
67 |
+
force_download: false
|
68 |
+
local_files_only: false
|
69 |
+
trust_remote_code: true
|
70 |
+
environment:
|
71 |
+
optimum_version: 1.16.1
|
72 |
+
optimum_commit: null
|
73 |
+
transformers_version: 4.36.2
|
74 |
+
transformers_commit: null
|
75 |
+
accelerate_version: 0.25.0
|
76 |
+
accelerate_commit: null
|
77 |
+
diffusers_version: null
|
78 |
+
diffusers_commit: null
|
79 |
+
python_version: 3.10.12
|
80 |
+
system: Linux
|
81 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
82 |
+
cpu_count: 32
|
83 |
+
cpu_ram_mb: 134796
|
84 |
+
gpus:
|
85 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/AI-Sweden-Models/gpt-sw3-1.3b/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.00882,113.0,3715,3715,2831,2856,7.84e-07,5.28e-08,1.69,151.0,1.68,152.0,3903,3903,2999,3045,3.69e-07,2.48e-08
|
audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/.hydra/config.yaml
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: null
|
21 |
+
quantization_config: {}
|
22 |
+
data_parallel: false
|
23 |
+
deepspeed_inference: false
|
24 |
+
deepspeed_inference_config: {}
|
25 |
+
peft_strategy: null
|
26 |
+
peft_config: {}
|
27 |
+
benchmark:
|
28 |
+
name: inference
|
29 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
30 |
+
duration: 10
|
31 |
+
warmup_runs: 10
|
32 |
+
memory: true
|
33 |
+
energy: true
|
34 |
+
input_shapes:
|
35 |
+
batch_size: 1
|
36 |
+
sequence_length: 256
|
37 |
+
num_choices: 1
|
38 |
+
feature_size: 80
|
39 |
+
nb_max_frames: 3000
|
40 |
+
audio_sequence_length: 16000
|
41 |
+
new_tokens: 256
|
42 |
+
can_diffuse: ${can_diffuse:${task}}
|
43 |
+
can_generate: ${can_generate:${task}}
|
44 |
+
forward_kwargs: {}
|
45 |
+
generate_kwargs: {}
|
46 |
+
launcher:
|
47 |
+
name: process
|
48 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
49 |
+
device_isolation: true
|
50 |
+
start_method: spawn
|
51 |
+
experiment_name: pytorch+cuda+float16
|
52 |
+
device: cuda
|
53 |
+
model: BEE-spoke-data/Mixtral-GQA-400m-v2
|
54 |
+
task: ${infer_task:${model}}
|
55 |
+
hub_kwargs:
|
56 |
+
revision: main
|
57 |
+
cache_dir: null
|
58 |
+
force_download: false
|
59 |
+
local_files_only: false
|
60 |
+
trust_remote_code: true
|
61 |
+
environment:
|
62 |
+
optimum_version: 1.16.1
|
63 |
+
optimum_commit: null
|
64 |
+
transformers_version: 4.36.2
|
65 |
+
transformers_commit: null
|
66 |
+
accelerate_version: 0.26.1
|
67 |
+
accelerate_commit: null
|
68 |
+
diffusers_version: null
|
69 |
+
diffusers_commit: null
|
70 |
+
python_version: 3.10.12
|
71 |
+
system: Linux
|
72 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
73 |
+
cpu_count: 32
|
74 |
+
cpu_ram_mb: 134796
|
75 |
+
gpus:
|
76 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=BEE-spoke-data/Mixtral-GQA-400m-v2
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=BEE-spoke-data/Mixtral-GQA-400m-v2
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=BEE-spoke-data/Mixtral-GQA-400m-v2
|
audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/cli.log
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 09:46:54,246][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 09:46:54,246][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 09:46:54,248][process][INFO] - + Launched worker process with PID 505734.
|
4 |
+
[2024-01-13 09:46:54,248][isolation][INFO] - + Launched device(s) isolation process 505735.
|
5 |
+
[2024-01-13 09:46:56,106][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
6 |
+
[2024-01-13 09:46:56,106][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
7 |
+
[2024-01-13 09:46:56,245][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
8 |
+
[2024-01-13 09:46:57,449][backend][INFO] - Configuring pytorch backend
|
9 |
+
[2024-01-13 09:46:57,449][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type mixtral
|
10 |
+
[2024-01-13 09:46:57,449][pytorch][INFO] - + Disabling gradients
|
11 |
+
[2024-01-13 09:46:57,449][pytorch][INFO] - + Loading model with no weights
|
12 |
+
[2024-01-13 09:46:57,449][pytorch][INFO] - + Creating no weights model directory
|
13 |
+
[2024-01-13 09:46:57,450][pytorch][INFO] - + Saving pretrained config
|
14 |
+
[2024-01-13 09:46:57,450][pytorch][INFO] - + Creating no weights model
|
15 |
+
[2024-01-13 09:46:57,468][pytorch][INFO] - + Saving no weights model
|
16 |
+
[2024-01-13 09:46:57,468][pytorch][INFO] - + Loading no weights model
|
17 |
+
[2024-01-13 09:46:57,468][pytorch][INFO] - + Loading model directly on device: cuda
|
18 |
+
[2024-01-13 09:46:57,644][pytorch][INFO] - + Randomizing model weights
|
19 |
+
[2024-01-13 09:46:57,646][pytorch][INFO] - + Tying model weights after randomization
|
20 |
+
[2024-01-13 09:46:57,646][pytorch][INFO] - + Turning on model's eval mode
|
21 |
+
[2024-01-13 09:46:57,704][benchmark][INFO] - Configuring inference benchmark
|
22 |
+
[2024-01-13 09:46:57,704][inference][INFO] - Running inference benchmark
|
23 |
+
[2024-01-13 09:46:57,704][inference][INFO] - + Updating input shapes with model shapes
|
24 |
+
[2024-01-13 09:46:57,705][inference][INFO] - + Preparing backend for inference
|
25 |
+
[2024-01-13 09:46:57,705][inference][INFO] - + Creating input generator
|
26 |
+
[2024-01-13 09:46:57,705][input-generator][INFO] - Using text-generation task generator
|
27 |
+
[2024-01-13 09:46:57,705][inference][INFO] - + Preparing input for the forward pass
|
28 |
+
[2024-01-13 09:46:57,705][backend][INFO] - + Moving inputs tensors to device cuda
|
29 |
+
[2024-01-13 09:46:57,705][inference][INFO] - + Tracking forward pass peak memory
|
30 |
+
[2024-01-13 09:46:57,705][memory][INFO] - Tracking CUDA devices: [0]
|
31 |
+
[2024-01-13 09:46:57,705][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
32 |
+
[2024-01-13 09:46:58,032][inference][INFO] - + Forward pass max memory used: 4967 (MB)
|
33 |
+
[2024-01-13 09:46:58,032][inference][INFO] - + Forward pass max memory reserved: 4108 (MB)
|
34 |
+
[2024-01-13 09:46:58,032][inference][INFO] - + Forward pass max memory allocated: 4080 (MB)
|
35 |
+
[2024-01-13 09:46:58,032][inference][INFO] - + Preparing input for the generation pass
|
36 |
+
[2024-01-13 09:46:58,032][backend][INFO] - + Moving inputs tensors to device cuda
|
37 |
+
[2024-01-13 09:46:58,032][inference][INFO] - + Tracking generation pass peak memory
|
38 |
+
[2024-01-13 09:46:58,032][memory][INFO] - Tracking CUDA devices: [0]
|
39 |
+
[2024-01-13 09:46:58,032][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
40 |
+
[2024-01-13 09:46:59,801][inference][INFO] - + Generation pass max memory used: 4971 (MB)
|
41 |
+
[2024-01-13 09:46:59,801][inference][INFO] - + Generation pass max memory reserved: 4112 (MB)
|
42 |
+
[2024-01-13 09:46:59,801][inference][INFO] - + Generation pass max memory allocated: 4083 (MB)
|
43 |
+
[2024-01-13 09:46:59,801][inference][INFO] - + Preparing input for the forward pass
|
44 |
+
[2024-01-13 09:46:59,801][backend][INFO] - + Moving inputs tensors to device cuda
|
45 |
+
[2024-01-13 09:46:59,801][inference][INFO] - + Warming up the forward pass
|
46 |
+
[2024-01-13 09:46:59,942][inference][INFO] - + Tracking forward pass latency and throughput
|
47 |
+
[2024-01-13 09:47:09,967][inference][INFO] - + Forward pass latency: 1.43e-02 (s)
|
48 |
+
[2024-01-13 09:47:09,967][inference][INFO] - + Forward pass throughput: 69.90 (samples/s)
|
49 |
+
[2024-01-13 09:47:09,967][inference][INFO] - + Preparing input for the generation pass
|
50 |
+
[2024-01-13 09:47:09,967][backend][INFO] - + Moving inputs tensors to device cuda
|
51 |
+
[2024-01-13 09:47:09,967][inference][INFO] - + Warming up the generation pass
|
52 |
+
[2024-01-13 09:47:11,576][inference][INFO] - + Tracking generation latency and throughput
|
53 |
+
[2024-01-13 09:47:22,842][inference][INFO] - + Generation pass latency: 1.61e+00 (s)
|
54 |
+
[2024-01-13 09:47:22,842][inference][INFO] - + Generation pass throughput: 159.00 (tokens/s)
|
55 |
+
[2024-01-13 09:47:22,842][inference][INFO] - + Preparing input for the forward pass
|
56 |
+
[2024-01-13 09:47:22,842][backend][INFO] - + Moving inputs tensors to device cuda
|
57 |
+
[2024-01-13 09:47:22,842][inference][INFO] - + Tracking forward pass energy consumption
|
58 |
+
[2024-01-13 09:47:37,301][inference][INFO] - + Forward pass energy consumption: 8.17e-07 (kWh/sample)
|
59 |
+
[2024-01-13 09:47:37,302][inference][INFO] - + Forward pass carbon emissions: 5.51e-08 (kgCO2eq/sample)
|
60 |
+
[2024-01-13 09:47:37,302][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/forward_codecarbon.csv
|
61 |
+
[2024-01-13 09:47:37,302][inference][INFO] - + Preparing input for the generation pass
|
62 |
+
[2024-01-13 09:47:37,302][backend][INFO] - + Moving inputs tensors to device cuda
|
63 |
+
[2024-01-13 09:47:37,302][inference][INFO] - + Tracking generation pass energy consumption
|
64 |
+
[2024-01-13 09:47:51,822][inference][INFO] - + Generation pass energy consumption: 2.62e-07 (kWh/token)
|
65 |
+
[2024-01-13 09:47:51,822][inference][INFO] - + Generation pass carbon emissions: 1.77e-08 (kgCO2eq/token)
|
66 |
+
[2024-01-13 09:47:51,823][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/generate_codecarbon.csv
|
67 |
+
[2024-01-13 09:47:51,823][inference][INFO] - Saving results
|
68 |
+
[2024-01-13 09:47:51,825][backend][INFO] - Cleaning pytorch backend
|
69 |
+
[2024-01-13 09:47:51,825][backend][INFO] - + Deleting pretrained model
|
70 |
+
[2024-01-13 09:47:51,913][pytorch][INFO] - + Emptying CUDA cache
|
71 |
+
[2024-01-13 09:47:51,995][pytorch][INFO] - + Cleaning temporary directory
|
72 |
+
[2024-01-13 09:47:52,520][isolation][INFO] - + Closing device(s) isolation process...
|
audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T09:47:37,codecarbon,ec0895b8-f8d9-4554-8fce-4936a9d61ae6,10.03585147857666,3.8655154139889615e-05,3.851706476765427e-06,42.5,0.0,0.3769354820251465,0.00011847589181529153,0.0004541011966139319,1.0266233958191153e-06,0.0005736037118250425,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T09:47:51,codecarbon,b2ecfb96-40fa-4d69-9444-cf636a2aacf3,10.084577083587646,2.71611266938231e-05,2.6933332423059206e-06,42.5,0.0,0.37764215469360357,0.00011905126257075206,0.00028296467081578847,1.0279409838868029e-06,0.0004030438743704273,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/hydra_config.yaml
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: null
|
26 |
+
quantization_config: {}
|
27 |
+
data_parallel: false
|
28 |
+
deepspeed_inference: false
|
29 |
+
deepspeed_inference_config: {}
|
30 |
+
peft_strategy: null
|
31 |
+
peft_config: {}
|
32 |
+
benchmark:
|
33 |
+
name: inference
|
34 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
35 |
+
duration: 10
|
36 |
+
warmup_runs: 10
|
37 |
+
memory: true
|
38 |
+
energy: true
|
39 |
+
input_shapes:
|
40 |
+
batch_size: 1
|
41 |
+
sequence_length: 256
|
42 |
+
num_choices: 1
|
43 |
+
feature_size: 80
|
44 |
+
nb_max_frames: 3000
|
45 |
+
audio_sequence_length: 16000
|
46 |
+
new_tokens: 256
|
47 |
+
can_diffuse: false
|
48 |
+
can_generate: true
|
49 |
+
forward_kwargs: {}
|
50 |
+
generate_kwargs:
|
51 |
+
num_return_sequences: 1
|
52 |
+
max_new_tokens: 256
|
53 |
+
min_new_tokens: 256
|
54 |
+
do_sample: false
|
55 |
+
use_cache: true
|
56 |
+
pad_token_id: 0
|
57 |
+
temperature: 1.0
|
58 |
+
num_beams: 1
|
59 |
+
experiment_name: pytorch+cuda+float16
|
60 |
+
device: cuda
|
61 |
+
model: BEE-spoke-data/Mixtral-GQA-400m-v2
|
62 |
+
task: text-generation
|
63 |
+
hub_kwargs:
|
64 |
+
revision: main
|
65 |
+
cache_dir: null
|
66 |
+
force_download: false
|
67 |
+
local_files_only: false
|
68 |
+
trust_remote_code: true
|
69 |
+
environment:
|
70 |
+
optimum_version: 1.16.1
|
71 |
+
optimum_commit: null
|
72 |
+
transformers_version: 4.36.2
|
73 |
+
transformers_commit: null
|
74 |
+
accelerate_version: 0.26.1
|
75 |
+
accelerate_commit: null
|
76 |
+
diffusers_version: null
|
77 |
+
diffusers_commit: null
|
78 |
+
python_version: 3.10.12
|
79 |
+
system: Linux
|
80 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
81 |
+
cpu_count: 32
|
82 |
+
cpu_ram_mb: 134796
|
83 |
+
gpus:
|
84 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/BEE-spoke-data/Mixtral-GQA-400m-v2/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.0143,69.9,4967,4967,4080,4108,8.17e-07,5.51e-08,1.61,159.0,1.6,159.0,4971,4971,4083,4112,2.62e-07,1.77e-08
|
audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/.hydra/config.yaml
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: null
|
21 |
+
quantization_config: {}
|
22 |
+
data_parallel: false
|
23 |
+
deepspeed_inference: false
|
24 |
+
deepspeed_inference_config: {}
|
25 |
+
peft_strategy: null
|
26 |
+
peft_config: {}
|
27 |
+
benchmark:
|
28 |
+
name: inference
|
29 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
30 |
+
duration: 10
|
31 |
+
warmup_runs: 10
|
32 |
+
memory: true
|
33 |
+
energy: true
|
34 |
+
input_shapes:
|
35 |
+
batch_size: 1
|
36 |
+
sequence_length: 256
|
37 |
+
num_choices: 1
|
38 |
+
feature_size: 80
|
39 |
+
nb_max_frames: 3000
|
40 |
+
audio_sequence_length: 16000
|
41 |
+
new_tokens: 256
|
42 |
+
can_diffuse: ${can_diffuse:${task}}
|
43 |
+
can_generate: ${can_generate:${task}}
|
44 |
+
forward_kwargs: {}
|
45 |
+
generate_kwargs: {}
|
46 |
+
launcher:
|
47 |
+
name: process
|
48 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
49 |
+
device_isolation: true
|
50 |
+
start_method: spawn
|
51 |
+
experiment_name: pytorch+cuda+float16
|
52 |
+
device: cuda
|
53 |
+
model: BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI
|
54 |
+
task: ${infer_task:${model}}
|
55 |
+
hub_kwargs:
|
56 |
+
revision: main
|
57 |
+
cache_dir: null
|
58 |
+
force_download: false
|
59 |
+
local_files_only: false
|
60 |
+
trust_remote_code: true
|
61 |
+
environment:
|
62 |
+
optimum_version: 1.16.1
|
63 |
+
optimum_commit: null
|
64 |
+
transformers_version: 4.36.2
|
65 |
+
transformers_commit: null
|
66 |
+
accelerate_version: 0.26.1
|
67 |
+
accelerate_commit: null
|
68 |
+
diffusers_version: null
|
69 |
+
diffusers_commit: null
|
70 |
+
python_version: 3.10.12
|
71 |
+
system: Linux
|
72 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
73 |
+
cpu_count: 32
|
74 |
+
cpu_ram_mb: 134796
|
75 |
+
gpus:
|
76 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI
|
audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/cli.log
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 09:57:33,921][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 09:57:33,921][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 09:57:33,922][process][INFO] - + Launched worker process with PID 531309.
|
4 |
+
[2024-01-13 09:57:33,923][isolation][INFO] - + Launched device(s) isolation process 531310.
|
5 |
+
[2024-01-13 09:57:36,017][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
6 |
+
[2024-01-13 09:57:36,017][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
7 |
+
[2024-01-13 09:57:36,161][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
8 |
+
[2024-01-13 09:57:37,523][backend][INFO] - Configuring pytorch backend
|
9 |
+
[2024-01-13 09:57:37,523][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
10 |
+
[2024-01-13 09:57:37,523][pytorch][INFO] - + Disabling gradients
|
11 |
+
[2024-01-13 09:57:37,523][pytorch][INFO] - + Loading model with no weights
|
12 |
+
[2024-01-13 09:57:37,523][pytorch][INFO] - + Creating no weights model directory
|
13 |
+
[2024-01-13 09:57:37,523][pytorch][INFO] - + Saving pretrained config
|
14 |
+
[2024-01-13 09:57:37,524][pytorch][INFO] - + Creating no weights model
|
15 |
+
[2024-01-13 09:57:37,524][pytorch][INFO] - + Saving no weights model
|
16 |
+
[2024-01-13 09:57:37,525][pytorch][INFO] - + Loading no weights model
|
17 |
+
[2024-01-13 09:57:37,525][pytorch][INFO] - + Loading model directly on device: cuda
|
18 |
+
[2024-01-13 09:57:37,653][pytorch][INFO] - + Randomizing model weights
|
19 |
+
[2024-01-13 09:57:37,653][pytorch][INFO] - + Tying model weights after randomization
|
20 |
+
[2024-01-13 09:57:37,654][pytorch][INFO] - + Turning on model's eval mode
|
21 |
+
[2024-01-13 09:57:37,713][benchmark][INFO] - Configuring inference benchmark
|
22 |
+
[2024-01-13 09:57:37,713][inference][INFO] - Running inference benchmark
|
23 |
+
[2024-01-13 09:57:37,713][inference][INFO] - + Updating input shapes with model shapes
|
24 |
+
[2024-01-13 09:57:37,713][inference][INFO] - + Preparing backend for inference
|
25 |
+
[2024-01-13 09:57:37,713][inference][INFO] - + Creating input generator
|
26 |
+
[2024-01-13 09:57:37,713][input-generator][INFO] - Using text-generation task generator
|
27 |
+
[2024-01-13 09:57:37,714][inference][INFO] - + Preparing input for the forward pass
|
28 |
+
[2024-01-13 09:57:37,714][backend][INFO] - + Moving inputs tensors to device cuda
|
29 |
+
[2024-01-13 09:57:37,714][inference][INFO] - + Tracking forward pass peak memory
|
30 |
+
[2024-01-13 09:57:37,714][memory][INFO] - Tracking CUDA devices: [0]
|
31 |
+
[2024-01-13 09:57:37,714][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
32 |
+
[2024-01-13 09:57:37,866][inference][INFO] - + Forward pass max memory used: 1399 (MB)
|
33 |
+
[2024-01-13 09:57:37,866][inference][INFO] - + Forward pass max memory reserved: 541 (MB)
|
34 |
+
[2024-01-13 09:57:37,866][inference][INFO] - + Forward pass max memory allocated: 500 (MB)
|
35 |
+
[2024-01-13 09:57:37,866][inference][INFO] - + Preparing input for the generation pass
|
36 |
+
[2024-01-13 09:57:37,866][backend][INFO] - + Moving inputs tensors to device cuda
|
37 |
+
[2024-01-13 09:57:37,867][inference][INFO] - + Tracking generation pass peak memory
|
38 |
+
[2024-01-13 09:57:37,867][memory][INFO] - Tracking CUDA devices: [0]
|
39 |
+
[2024-01-13 09:57:37,867][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
40 |
+
[2024-01-13 09:57:39,005][inference][INFO] - + Generation pass max memory used: 1408 (MB)
|
41 |
+
[2024-01-13 09:57:39,005][inference][INFO] - + Generation pass max memory reserved: 549 (MB)
|
42 |
+
[2024-01-13 09:57:39,005][inference][INFO] - + Generation pass max memory allocated: 500 (MB)
|
43 |
+
[2024-01-13 09:57:39,006][inference][INFO] - + Preparing input for the forward pass
|
44 |
+
[2024-01-13 09:57:39,006][backend][INFO] - + Moving inputs tensors to device cuda
|
45 |
+
[2024-01-13 09:57:39,006][inference][INFO] - + Warming up the forward pass
|
46 |
+
[2024-01-13 09:57:39,045][inference][INFO] - + Tracking forward pass latency and throughput
|
47 |
+
[2024-01-13 09:57:49,104][inference][INFO] - + Forward pass latency: 3.91e-03 (s)
|
48 |
+
[2024-01-13 09:57:49,105][inference][INFO] - + Forward pass throughput: 256.00 (samples/s)
|
49 |
+
[2024-01-13 09:57:49,105][inference][INFO] - + Preparing input for the generation pass
|
50 |
+
[2024-01-13 09:57:49,105][backend][INFO] - + Moving inputs tensors to device cuda
|
51 |
+
[2024-01-13 09:57:49,106][inference][INFO] - + Warming up the generation pass
|
52 |
+
[2024-01-13 09:57:50,148][inference][INFO] - + Tracking generation latency and throughput
|
53 |
+
[2024-01-13 09:58:00,577][inference][INFO] - + Generation pass latency: 1.04e+00 (s)
|
54 |
+
[2024-01-13 09:58:00,577][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
|
55 |
+
[2024-01-13 09:58:00,577][inference][INFO] - + Preparing input for the forward pass
|
56 |
+
[2024-01-13 09:58:00,577][backend][INFO] - + Moving inputs tensors to device cuda
|
57 |
+
[2024-01-13 09:58:00,578][inference][INFO] - + Tracking forward pass energy consumption
|
58 |
+
[2024-01-13 09:58:15,057][inference][INFO] - + Forward pass energy consumption: 2.01e-07 (kWh/sample)
|
59 |
+
[2024-01-13 09:58:15,057][inference][INFO] - + Forward pass carbon emissions: 1.36e-08 (kgCO2eq/sample)
|
60 |
+
[2024-01-13 09:58:15,057][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/forward_codecarbon.csv
|
61 |
+
[2024-01-13 09:58:15,057][inference][INFO] - + Preparing input for the generation pass
|
62 |
+
[2024-01-13 09:58:15,057][backend][INFO] - + Moving inputs tensors to device cuda
|
63 |
+
[2024-01-13 09:58:15,057][inference][INFO] - + Tracking generation pass energy consumption
|
64 |
+
[2024-01-13 09:58:29,986][inference][INFO] - + Generation pass energy consumption: 1.35e-07 (kWh/token)
|
65 |
+
[2024-01-13 09:58:29,987][inference][INFO] - + Generation pass carbon emissions: 9.13e-09 (kgCO2eq/token)
|
66 |
+
[2024-01-13 09:58:29,987][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/generate_codecarbon.csv
|
67 |
+
[2024-01-13 09:58:29,987][inference][INFO] - Saving results
|
68 |
+
[2024-01-13 09:58:29,991][backend][INFO] - Cleaning pytorch backend
|
69 |
+
[2024-01-13 09:58:29,991][backend][INFO] - + Deleting pretrained model
|
70 |
+
[2024-01-13 09:58:30,078][pytorch][INFO] - + Emptying CUDA cache
|
71 |
+
[2024-01-13 09:58:30,090][pytorch][INFO] - + Cleaning temporary directory
|
72 |
+
[2024-01-13 09:58:30,609][isolation][INFO] - + Closing device(s) isolation process...
|
audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T09:58:15,codecarbon,37c60079-55d9-44d2-8122-e186a25f741a,10.040909051895142,3.5006568124488985e-05,3.486394303898388e-06,42.5,0.0,0.3632984161376953,0.00011853570060597527,0.0003999472644020674,9.79390304154606e-07,0.0005194623553121974,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T09:58:29,codecarbon,23e1cdcd-e776-4b35-b002-45d33aafe2ae,10.495614528656006,2.3375966630948674e-05,2.2272127627330113e-06,42.5,76.65699446370796,0.36365318298339844,0.00012390379359324775,0.00022193545532611658,1.0366470733449283e-06,0.0003468758959927092,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/hydra_config.yaml
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: null
|
26 |
+
quantization_config: {}
|
27 |
+
data_parallel: false
|
28 |
+
deepspeed_inference: false
|
29 |
+
deepspeed_inference_config: {}
|
30 |
+
peft_strategy: null
|
31 |
+
peft_config: {}
|
32 |
+
benchmark:
|
33 |
+
name: inference
|
34 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
35 |
+
duration: 10
|
36 |
+
warmup_runs: 10
|
37 |
+
memory: true
|
38 |
+
energy: true
|
39 |
+
input_shapes:
|
40 |
+
batch_size: 1
|
41 |
+
sequence_length: 256
|
42 |
+
num_choices: 1
|
43 |
+
feature_size: 80
|
44 |
+
nb_max_frames: 3000
|
45 |
+
audio_sequence_length: 16000
|
46 |
+
new_tokens: 256
|
47 |
+
can_diffuse: false
|
48 |
+
can_generate: true
|
49 |
+
forward_kwargs: {}
|
50 |
+
generate_kwargs:
|
51 |
+
num_return_sequences: 1
|
52 |
+
max_new_tokens: 256
|
53 |
+
min_new_tokens: 256
|
54 |
+
do_sample: false
|
55 |
+
use_cache: true
|
56 |
+
pad_token_id: 0
|
57 |
+
temperature: 1.0
|
58 |
+
num_beams: 1
|
59 |
+
experiment_name: pytorch+cuda+float16
|
60 |
+
device: cuda
|
61 |
+
model: BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI
|
62 |
+
task: text-generation
|
63 |
+
hub_kwargs:
|
64 |
+
revision: main
|
65 |
+
cache_dir: null
|
66 |
+
force_download: false
|
67 |
+
local_files_only: false
|
68 |
+
trust_remote_code: true
|
69 |
+
environment:
|
70 |
+
optimum_version: 1.16.1
|
71 |
+
optimum_commit: null
|
72 |
+
transformers_version: 4.36.2
|
73 |
+
transformers_commit: null
|
74 |
+
accelerate_version: 0.26.1
|
75 |
+
accelerate_commit: null
|
76 |
+
diffusers_version: null
|
77 |
+
diffusers_commit: null
|
78 |
+
python_version: 3.10.12
|
79 |
+
system: Linux
|
80 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
81 |
+
cpu_count: 32
|
82 |
+
cpu_ram_mb: 134796
|
83 |
+
gpus:
|
84 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/BEE-spoke-data/NanoLlama-GQA-L10-A32_KV8-v13-KI/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.00391,256.0,1399,1399,500,541,2.01e-07,1.36e-08,1.04,246.0,1.04,245.0,1408,1408,500,549,1.35e-07,9.13e-09
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/.hydra/config.yaml
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: null
|
21 |
+
quantization_config: {}
|
22 |
+
data_parallel: false
|
23 |
+
deepspeed_inference: false
|
24 |
+
deepspeed_inference_config: {}
|
25 |
+
peft_strategy: null
|
26 |
+
peft_config: {}
|
27 |
+
benchmark:
|
28 |
+
name: inference
|
29 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
30 |
+
duration: 10
|
31 |
+
warmup_runs: 10
|
32 |
+
memory: true
|
33 |
+
energy: true
|
34 |
+
input_shapes:
|
35 |
+
batch_size: 1
|
36 |
+
sequence_length: 256
|
37 |
+
num_choices: 1
|
38 |
+
feature_size: 80
|
39 |
+
nb_max_frames: 3000
|
40 |
+
audio_sequence_length: 16000
|
41 |
+
new_tokens: 256
|
42 |
+
can_diffuse: ${can_diffuse:${task}}
|
43 |
+
can_generate: ${can_generate:${task}}
|
44 |
+
forward_kwargs: {}
|
45 |
+
generate_kwargs: {}
|
46 |
+
launcher:
|
47 |
+
name: process
|
48 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
49 |
+
device_isolation: true
|
50 |
+
start_method: spawn
|
51 |
+
experiment_name: pytorch+cuda+float16
|
52 |
+
device: cuda
|
53 |
+
model: BEE-spoke-data/smol_llama-101M-GQA
|
54 |
+
task: ${infer_task:${model}}
|
55 |
+
hub_kwargs:
|
56 |
+
revision: main
|
57 |
+
cache_dir: null
|
58 |
+
force_download: false
|
59 |
+
local_files_only: false
|
60 |
+
trust_remote_code: true
|
61 |
+
environment:
|
62 |
+
optimum_version: 1.16.1
|
63 |
+
optimum_commit: null
|
64 |
+
transformers_version: 4.36.2
|
65 |
+
transformers_commit: null
|
66 |
+
accelerate_version: 0.26.1
|
67 |
+
accelerate_commit: null
|
68 |
+
diffusers_version: null
|
69 |
+
diffusers_commit: null
|
70 |
+
python_version: 3.10.12
|
71 |
+
system: Linux
|
72 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
73 |
+
cpu_count: 32
|
74 |
+
cpu_ram_mb: 134796
|
75 |
+
gpus:
|
76 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=BEE-spoke-data/smol_llama-101M-GQA
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=BEE-spoke-data/smol_llama-101M-GQA
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=BEE-spoke-data/smol_llama-101M-GQA
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/cli.log
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 09:52:40,736][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 09:52:40,737][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 09:52:40,738][process][INFO] - + Launched worker process with PID 519384.
|
4 |
+
[2024-01-13 09:52:40,738][isolation][INFO] - + Launched device(s) isolation process 519385.
|
5 |
+
[2024-01-13 09:52:42,734][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
6 |
+
[2024-01-13 09:52:42,734][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
7 |
+
[2024-01-13 09:52:42,872][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
8 |
+
[2024-01-13 09:52:44,056][backend][INFO] - Configuring pytorch backend
|
9 |
+
[2024-01-13 09:52:44,056][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
10 |
+
[2024-01-13 09:52:44,056][pytorch][INFO] - + Disabling gradients
|
11 |
+
[2024-01-13 09:52:44,057][pytorch][INFO] - + Loading model with no weights
|
12 |
+
[2024-01-13 09:52:44,057][pytorch][INFO] - + Creating no weights model directory
|
13 |
+
[2024-01-13 09:52:44,057][pytorch][INFO] - + Saving pretrained config
|
14 |
+
[2024-01-13 09:52:44,057][pytorch][INFO] - + Creating no weights model
|
15 |
+
[2024-01-13 09:52:44,058][pytorch][INFO] - + Saving no weights model
|
16 |
+
[2024-01-13 09:52:44,058][pytorch][INFO] - + Loading no weights model
|
17 |
+
[2024-01-13 09:52:44,058][pytorch][INFO] - + Loading model directly on device: cuda
|
18 |
+
[2024-01-13 09:52:44,184][pytorch][INFO] - + Randomizing model weights
|
19 |
+
[2024-01-13 09:52:44,184][pytorch][INFO] - + Tying model weights after randomization
|
20 |
+
[2024-01-13 09:52:44,185][pytorch][INFO] - + Turning on model's eval mode
|
21 |
+
[2024-01-13 09:52:44,244][benchmark][INFO] - Configuring inference benchmark
|
22 |
+
[2024-01-13 09:52:44,244][inference][INFO] - Running inference benchmark
|
23 |
+
[2024-01-13 09:52:44,244][inference][INFO] - + Updating input shapes with model shapes
|
24 |
+
[2024-01-13 09:52:44,244][inference][INFO] - + Preparing backend for inference
|
25 |
+
[2024-01-13 09:52:44,244][inference][INFO] - + Creating input generator
|
26 |
+
[2024-01-13 09:52:44,244][input-generator][INFO] - Using text-generation task generator
|
27 |
+
[2024-01-13 09:52:44,245][inference][INFO] - + Preparing input for the forward pass
|
28 |
+
[2024-01-13 09:52:44,245][backend][INFO] - + Moving inputs tensors to device cuda
|
29 |
+
[2024-01-13 09:52:44,245][inference][INFO] - + Tracking forward pass peak memory
|
30 |
+
[2024-01-13 09:52:44,245][memory][INFO] - Tracking CUDA devices: [0]
|
31 |
+
[2024-01-13 09:52:44,245][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
32 |
+
[2024-01-13 09:52:44,395][inference][INFO] - + Forward pass max memory used: 1148 (MB)
|
33 |
+
[2024-01-13 09:52:44,395][inference][INFO] - + Forward pass max memory reserved: 289 (MB)
|
34 |
+
[2024-01-13 09:52:44,396][inference][INFO] - + Forward pass max memory allocated: 269 (MB)
|
35 |
+
[2024-01-13 09:52:44,396][inference][INFO] - + Preparing input for the generation pass
|
36 |
+
[2024-01-13 09:52:44,396][backend][INFO] - + Moving inputs tensors to device cuda
|
37 |
+
[2024-01-13 09:52:44,396][inference][INFO] - + Tracking generation pass peak memory
|
38 |
+
[2024-01-13 09:52:44,396][memory][INFO] - Tracking CUDA devices: [0]
|
39 |
+
[2024-01-13 09:52:44,396][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
40 |
+
[2024-01-13 09:52:45,179][inference][INFO] - + Generation pass max memory used: 1152 (MB)
|
41 |
+
[2024-01-13 09:52:45,179][inference][INFO] - + Generation pass max memory reserved: 293 (MB)
|
42 |
+
[2024-01-13 09:52:45,179][inference][INFO] - + Generation pass max memory allocated: 269 (MB)
|
43 |
+
[2024-01-13 09:52:45,180][inference][INFO] - + Preparing input for the forward pass
|
44 |
+
[2024-01-13 09:52:45,180][backend][INFO] - + Moving inputs tensors to device cuda
|
45 |
+
[2024-01-13 09:52:45,180][inference][INFO] - + Warming up the forward pass
|
46 |
+
[2024-01-13 09:52:45,202][inference][INFO] - + Tracking forward pass latency and throughput
|
47 |
+
[2024-01-13 09:52:55,318][inference][INFO] - + Forward pass latency: 2.32e-03 (s)
|
48 |
+
[2024-01-13 09:52:55,320][inference][INFO] - + Forward pass throughput: 431.00 (samples/s)
|
49 |
+
[2024-01-13 09:52:55,320][inference][INFO] - + Preparing input for the generation pass
|
50 |
+
[2024-01-13 09:52:55,320][backend][INFO] - + Moving inputs tensors to device cuda
|
51 |
+
[2024-01-13 09:52:55,320][inference][INFO] - + Warming up the generation pass
|
52 |
+
[2024-01-13 09:52:55,956][inference][INFO] - + Tracking generation latency and throughput
|
53 |
+
[2024-01-13 09:53:06,162][inference][INFO] - + Generation pass latency: 6.38e-01 (s)
|
54 |
+
[2024-01-13 09:53:06,163][inference][INFO] - + Generation pass throughput: 401.00 (tokens/s)
|
55 |
+
[2024-01-13 09:53:06,163][inference][INFO] - + Preparing input for the forward pass
|
56 |
+
[2024-01-13 09:53:06,163][backend][INFO] - + Moving inputs tensors to device cuda
|
57 |
+
[2024-01-13 09:53:06,163][inference][INFO] - + Tracking forward pass energy consumption
|
58 |
+
[2024-01-13 09:53:20,645][inference][INFO] - + Forward pass energy consumption: 1.12e-07 (kWh/sample)
|
59 |
+
[2024-01-13 09:53:20,646][inference][INFO] - + Forward pass carbon emissions: 7.52e-09 (kgCO2eq/sample)
|
60 |
+
[2024-01-13 09:53:20,646][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/forward_codecarbon.csv
|
61 |
+
[2024-01-13 09:53:20,646][inference][INFO] - + Preparing input for the generation pass
|
62 |
+
[2024-01-13 09:53:20,646][backend][INFO] - + Moving inputs tensors to device cuda
|
63 |
+
[2024-01-13 09:53:20,646][inference][INFO] - + Tracking generation pass energy consumption
|
64 |
+
[2024-01-13 09:53:35,169][inference][INFO] - + Generation pass energy consumption: 7.81e-08 (kWh/token)
|
65 |
+
[2024-01-13 09:53:35,169][inference][INFO] - + Generation pass carbon emissions: 5.26e-09 (kgCO2eq/token)
|
66 |
+
[2024-01-13 09:53:35,169][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/generate_codecarbon.csv
|
67 |
+
[2024-01-13 09:53:35,169][inference][INFO] - Saving results
|
68 |
+
[2024-01-13 09:53:35,176][backend][INFO] - Cleaning pytorch backend
|
69 |
+
[2024-01-13 09:53:35,176][backend][INFO] - + Deleting pretrained model
|
70 |
+
[2024-01-13 09:53:35,265][pytorch][INFO] - + Emptying CUDA cache
|
71 |
+
[2024-01-13 09:53:35,272][pytorch][INFO] - + Cleaning temporary directory
|
72 |
+
[2024-01-13 09:53:35,808][isolation][INFO] - + Closing device(s) isolation process...
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T09:53:20,codecarbon,e731a8b9-d0f2-47e5-b0ad-850796786950,10.037946939468384,3.16057820516264e-05,3.1486301175148733e-06,42.5,0.0,0.35999536514282227,0.00011850104083617528,0.00034951833516982944,9.787253684780238e-07,0.0004689981013744828,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T09:53:35,codecarbon,ac524719-6154-4313-9713-59103e2466cf,10.130835056304932,2.0217237487941266e-05,1.995614120215989e-06,42.5,56.63061379693696,0.3599996566772461,0.00011959752688805264,0.0001794204213139139,9.855758808537492e-07,0.00030000352408282035,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/hydra_config.yaml
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: null
|
26 |
+
quantization_config: {}
|
27 |
+
data_parallel: false
|
28 |
+
deepspeed_inference: false
|
29 |
+
deepspeed_inference_config: {}
|
30 |
+
peft_strategy: null
|
31 |
+
peft_config: {}
|
32 |
+
benchmark:
|
33 |
+
name: inference
|
34 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
35 |
+
duration: 10
|
36 |
+
warmup_runs: 10
|
37 |
+
memory: true
|
38 |
+
energy: true
|
39 |
+
input_shapes:
|
40 |
+
batch_size: 1
|
41 |
+
sequence_length: 256
|
42 |
+
num_choices: 1
|
43 |
+
feature_size: 80
|
44 |
+
nb_max_frames: 3000
|
45 |
+
audio_sequence_length: 16000
|
46 |
+
new_tokens: 256
|
47 |
+
can_diffuse: false
|
48 |
+
can_generate: true
|
49 |
+
forward_kwargs: {}
|
50 |
+
generate_kwargs:
|
51 |
+
num_return_sequences: 1
|
52 |
+
max_new_tokens: 256
|
53 |
+
min_new_tokens: 256
|
54 |
+
do_sample: false
|
55 |
+
use_cache: true
|
56 |
+
pad_token_id: 0
|
57 |
+
temperature: 1.0
|
58 |
+
num_beams: 1
|
59 |
+
experiment_name: pytorch+cuda+float16
|
60 |
+
device: cuda
|
61 |
+
model: BEE-spoke-data/smol_llama-101M-GQA
|
62 |
+
task: text-generation
|
63 |
+
hub_kwargs:
|
64 |
+
revision: main
|
65 |
+
cache_dir: null
|
66 |
+
force_download: false
|
67 |
+
local_files_only: false
|
68 |
+
trust_remote_code: true
|
69 |
+
environment:
|
70 |
+
optimum_version: 1.16.1
|
71 |
+
optimum_commit: null
|
72 |
+
transformers_version: 4.36.2
|
73 |
+
transformers_commit: null
|
74 |
+
accelerate_version: 0.26.1
|
75 |
+
accelerate_commit: null
|
76 |
+
diffusers_version: null
|
77 |
+
diffusers_commit: null
|
78 |
+
python_version: 3.10.12
|
79 |
+
system: Linux
|
80 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
81 |
+
cpu_count: 32
|
82 |
+
cpu_ram_mb: 134796
|
83 |
+
gpus:
|
84 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-101M-GQA/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.00232,431.0,1148,1148,269,289,1.12e-07,7.52e-09,0.638,401.0,0.636,401.0,1152,1152,269,293,7.81e-08,5.26e-09
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/.hydra/config.yaml
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: null
|
21 |
+
quantization_config: {}
|
22 |
+
data_parallel: false
|
23 |
+
deepspeed_inference: false
|
24 |
+
deepspeed_inference_config: {}
|
25 |
+
peft_strategy: null
|
26 |
+
peft_config: {}
|
27 |
+
benchmark:
|
28 |
+
name: inference
|
29 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
30 |
+
duration: 10
|
31 |
+
warmup_runs: 10
|
32 |
+
memory: true
|
33 |
+
energy: true
|
34 |
+
input_shapes:
|
35 |
+
batch_size: 1
|
36 |
+
sequence_length: 256
|
37 |
+
num_choices: 1
|
38 |
+
feature_size: 80
|
39 |
+
nb_max_frames: 3000
|
40 |
+
audio_sequence_length: 16000
|
41 |
+
new_tokens: 256
|
42 |
+
can_diffuse: ${can_diffuse:${task}}
|
43 |
+
can_generate: ${can_generate:${task}}
|
44 |
+
forward_kwargs: {}
|
45 |
+
generate_kwargs: {}
|
46 |
+
launcher:
|
47 |
+
name: process
|
48 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
49 |
+
device_isolation: true
|
50 |
+
start_method: spawn
|
51 |
+
experiment_name: pytorch+cuda+float16
|
52 |
+
device: cuda
|
53 |
+
model: BEE-spoke-data/smol_llama-220M-GQA
|
54 |
+
task: ${infer_task:${model}}
|
55 |
+
hub_kwargs:
|
56 |
+
revision: main
|
57 |
+
cache_dir: null
|
58 |
+
force_download: false
|
59 |
+
local_files_only: false
|
60 |
+
trust_remote_code: true
|
61 |
+
environment:
|
62 |
+
optimum_version: 1.16.1
|
63 |
+
optimum_commit: null
|
64 |
+
transformers_version: 4.36.2
|
65 |
+
transformers_commit: null
|
66 |
+
accelerate_version: 0.26.1
|
67 |
+
accelerate_commit: null
|
68 |
+
diffusers_version: null
|
69 |
+
diffusers_commit: null
|
70 |
+
python_version: 3.10.12
|
71 |
+
system: Linux
|
72 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
73 |
+
cpu_count: 32
|
74 |
+
cpu_ram_mb: 134796
|
75 |
+
gpus:
|
76 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=BEE-spoke-data/smol_llama-220M-GQA
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=BEE-spoke-data/smol_llama-220M-GQA
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=BEE-spoke-data/smol_llama-220M-GQA
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/cli.log
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 10:02:36,957][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 10:02:36,958][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 10:02:36,959][process][INFO] - + Launched worker process with PID 544789.
|
4 |
+
[2024-01-13 10:02:36,959][isolation][INFO] - + Launched device(s) isolation process 544790.
|
5 |
+
[2024-01-13 10:02:38,851][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
6 |
+
[2024-01-13 10:02:38,851][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
7 |
+
[2024-01-13 10:02:38,988][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
8 |
+
[2024-01-13 10:02:40,362][backend][INFO] - Configuring pytorch backend
|
9 |
+
[2024-01-13 10:02:40,363][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
10 |
+
[2024-01-13 10:02:40,363][pytorch][INFO] - + Disabling gradients
|
11 |
+
[2024-01-13 10:02:40,363][pytorch][INFO] - + Loading model with no weights
|
12 |
+
[2024-01-13 10:02:40,363][pytorch][INFO] - + Creating no weights model directory
|
13 |
+
[2024-01-13 10:02:40,363][pytorch][INFO] - + Saving pretrained config
|
14 |
+
[2024-01-13 10:02:40,364][pytorch][INFO] - + Creating no weights model
|
15 |
+
[2024-01-13 10:02:40,364][pytorch][INFO] - + Saving no weights model
|
16 |
+
[2024-01-13 10:02:40,365][pytorch][INFO] - + Loading no weights model
|
17 |
+
[2024-01-13 10:02:40,365][pytorch][INFO] - + Loading model directly on device: cuda
|
18 |
+
[2024-01-13 10:02:40,498][pytorch][INFO] - + Randomizing model weights
|
19 |
+
[2024-01-13 10:02:40,499][pytorch][INFO] - + Tying model weights after randomization
|
20 |
+
[2024-01-13 10:02:40,500][pytorch][INFO] - + Turning on model's eval mode
|
21 |
+
[2024-01-13 10:02:40,559][benchmark][INFO] - Configuring inference benchmark
|
22 |
+
[2024-01-13 10:02:40,559][inference][INFO] - Running inference benchmark
|
23 |
+
[2024-01-13 10:02:40,559][inference][INFO] - + Updating input shapes with model shapes
|
24 |
+
[2024-01-13 10:02:40,559][inference][INFO] - + Preparing backend for inference
|
25 |
+
[2024-01-13 10:02:40,559][inference][INFO] - + Creating input generator
|
26 |
+
[2024-01-13 10:02:40,559][input-generator][INFO] - Using text-generation task generator
|
27 |
+
[2024-01-13 10:02:40,560][inference][INFO] - + Preparing input for the forward pass
|
28 |
+
[2024-01-13 10:02:40,560][backend][INFO] - + Moving inputs tensors to device cuda
|
29 |
+
[2024-01-13 10:02:40,560][inference][INFO] - + Tracking forward pass peak memory
|
30 |
+
[2024-01-13 10:02:40,560][memory][INFO] - Tracking CUDA devices: [0]
|
31 |
+
[2024-01-13 10:02:40,560][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
32 |
+
[2024-01-13 10:02:40,804][inference][INFO] - + Forward pass max memory used: 1397 (MB)
|
33 |
+
[2024-01-13 10:02:40,804][inference][INFO] - + Forward pass max memory reserved: 538 (MB)
|
34 |
+
[2024-01-13 10:02:40,805][inference][INFO] - + Forward pass max memory allocated: 497 (MB)
|
35 |
+
[2024-01-13 10:02:40,805][inference][INFO] - + Preparing input for the generation pass
|
36 |
+
[2024-01-13 10:02:40,805][backend][INFO] - + Moving inputs tensors to device cuda
|
37 |
+
[2024-01-13 10:02:40,805][inference][INFO] - + Tracking generation pass peak memory
|
38 |
+
[2024-01-13 10:02:40,805][memory][INFO] - Tracking CUDA devices: [0]
|
39 |
+
[2024-01-13 10:02:40,805][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
40 |
+
[2024-01-13 10:02:42,011][inference][INFO] - + Generation pass max memory used: 1408 (MB)
|
41 |
+
[2024-01-13 10:02:42,011][inference][INFO] - + Generation pass max memory reserved: 549 (MB)
|
42 |
+
[2024-01-13 10:02:42,011][inference][INFO] - + Generation pass max memory allocated: 500 (MB)
|
43 |
+
[2024-01-13 10:02:42,012][inference][INFO] - + Preparing input for the forward pass
|
44 |
+
[2024-01-13 10:02:42,012][backend][INFO] - + Moving inputs tensors to device cuda
|
45 |
+
[2024-01-13 10:02:42,012][inference][INFO] - + Warming up the forward pass
|
46 |
+
[2024-01-13 10:02:42,051][inference][INFO] - + Tracking forward pass latency and throughput
|
47 |
+
[2024-01-13 10:02:52,114][inference][INFO] - + Forward pass latency: 3.88e-03 (s)
|
48 |
+
[2024-01-13 10:02:52,115][inference][INFO] - + Forward pass throughput: 258.00 (samples/s)
|
49 |
+
[2024-01-13 10:02:52,116][inference][INFO] - + Preparing input for the generation pass
|
50 |
+
[2024-01-13 10:02:52,116][backend][INFO] - + Moving inputs tensors to device cuda
|
51 |
+
[2024-01-13 10:02:52,116][inference][INFO] - + Warming up the generation pass
|
52 |
+
[2024-01-13 10:02:53,161][inference][INFO] - + Tracking generation latency and throughput
|
53 |
+
[2024-01-13 10:03:03,606][inference][INFO] - + Generation pass latency: 1.04e+00 (s)
|
54 |
+
[2024-01-13 10:03:03,607][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
|
55 |
+
[2024-01-13 10:03:03,607][inference][INFO] - + Preparing input for the forward pass
|
56 |
+
[2024-01-13 10:03:03,607][backend][INFO] - + Moving inputs tensors to device cuda
|
57 |
+
[2024-01-13 10:03:03,607][inference][INFO] - + Tracking forward pass energy consumption
|
58 |
+
[2024-01-13 10:03:18,072][inference][INFO] - + Forward pass energy consumption: 1.96e-07 (kWh/sample)
|
59 |
+
[2024-01-13 10:03:18,072][inference][INFO] - + Forward pass carbon emissions: 1.32e-08 (kgCO2eq/sample)
|
60 |
+
[2024-01-13 10:03:18,072][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/forward_codecarbon.csv
|
61 |
+
[2024-01-13 10:03:18,072][inference][INFO] - + Preparing input for the generation pass
|
62 |
+
[2024-01-13 10:03:18,072][backend][INFO] - + Moving inputs tensors to device cuda
|
63 |
+
[2024-01-13 10:03:18,072][inference][INFO] - + Tracking generation pass energy consumption
|
64 |
+
[2024-01-13 10:03:32,915][inference][INFO] - + Generation pass energy consumption: 1.36e-07 (kWh/token)
|
65 |
+
[2024-01-13 10:03:32,915][inference][INFO] - + Generation pass carbon emissions: 9.18e-09 (kgCO2eq/token)
|
66 |
+
[2024-01-13 10:03:32,915][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/generate_codecarbon.csv
|
67 |
+
[2024-01-13 10:03:32,915][inference][INFO] - Saving results
|
68 |
+
[2024-01-13 10:03:32,920][backend][INFO] - Cleaning pytorch backend
|
69 |
+
[2024-01-13 10:03:32,920][backend][INFO] - + Deleting pretrained model
|
70 |
+
[2024-01-13 10:03:33,009][pytorch][INFO] - + Emptying CUDA cache
|
71 |
+
[2024-01-13 10:03:33,021][pytorch][INFO] - + Cleaning temporary directory
|
72 |
+
[2024-01-13 10:03:33,524][isolation][INFO] - + Closing device(s) isolation process...
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/forward_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T10:03:18,codecarbon,33df1861-3592-49c0-b3b6-984d36e07e43,10.035578966140747,3.502802899658431e-05,3.490384472561685e-06,42.5,0.0,0.35994815826416016,0.00011847292797433006,0.0004003289313736502,9.789537783635183e-07,0.0005197808131263438,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/generate_codecarbon.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
|
2 |
+
2024-01-13T10:03:32,codecarbon,e685eaea-e321-476e-a28b-1a62f6e369fd,10.400731801986694,2.34910579187555e-05,2.2585966416582685e-06,42.5,66.37527037995254,0.3602113723754883,0.00012278397960795298,0.00022478351316013168,1.0162424857437222e-06,0.00034858373525382844,France,FRA,île-de-france,,,Linux-5.15.0-91-generic-x86_64-with-glibc2.35,3.10.12,2.3.2,32,AMD Ryzen 9 7950X 16-Core Processor,1,1 x NVIDIA GeForce RTX 4090,2.4075,48.8323,125.53921508789062,process,N,1.0
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/hydra_config.yaml
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
launcher:
|
2 |
+
name: process
|
3 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
4 |
+
device_isolation: true
|
5 |
+
start_method: spawn
|
6 |
+
backend:
|
7 |
+
name: pytorch
|
8 |
+
version: 2.1.2+cu118
|
9 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
10 |
+
seed: 42
|
11 |
+
inter_op_num_threads: null
|
12 |
+
intra_op_num_threads: null
|
13 |
+
delete_cache: false
|
14 |
+
no_weights: true
|
15 |
+
device_map: null
|
16 |
+
torch_dtype: float16
|
17 |
+
eval_mode: true
|
18 |
+
disable_grad: true
|
19 |
+
amp_autocast: false
|
20 |
+
amp_dtype: null
|
21 |
+
torch_compile: false
|
22 |
+
torch_compile_config: {}
|
23 |
+
to_bettertransformer: false
|
24 |
+
use_flash_attention_2: false
|
25 |
+
quantization_scheme: null
|
26 |
+
quantization_config: {}
|
27 |
+
data_parallel: false
|
28 |
+
deepspeed_inference: false
|
29 |
+
deepspeed_inference_config: {}
|
30 |
+
peft_strategy: null
|
31 |
+
peft_config: {}
|
32 |
+
benchmark:
|
33 |
+
name: inference
|
34 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
35 |
+
duration: 10
|
36 |
+
warmup_runs: 10
|
37 |
+
memory: true
|
38 |
+
energy: true
|
39 |
+
input_shapes:
|
40 |
+
batch_size: 1
|
41 |
+
sequence_length: 256
|
42 |
+
num_choices: 1
|
43 |
+
feature_size: 80
|
44 |
+
nb_max_frames: 3000
|
45 |
+
audio_sequence_length: 16000
|
46 |
+
new_tokens: 256
|
47 |
+
can_diffuse: false
|
48 |
+
can_generate: true
|
49 |
+
forward_kwargs: {}
|
50 |
+
generate_kwargs:
|
51 |
+
num_return_sequences: 1
|
52 |
+
max_new_tokens: 256
|
53 |
+
min_new_tokens: 256
|
54 |
+
do_sample: false
|
55 |
+
use_cache: true
|
56 |
+
pad_token_id: 0
|
57 |
+
temperature: 1.0
|
58 |
+
num_beams: 1
|
59 |
+
experiment_name: pytorch+cuda+float16
|
60 |
+
device: cuda
|
61 |
+
model: BEE-spoke-data/smol_llama-220M-GQA
|
62 |
+
task: text-generation
|
63 |
+
hub_kwargs:
|
64 |
+
revision: main
|
65 |
+
cache_dir: null
|
66 |
+
force_download: false
|
67 |
+
local_files_only: false
|
68 |
+
trust_remote_code: true
|
69 |
+
environment:
|
70 |
+
optimum_version: 1.16.1
|
71 |
+
optimum_commit: null
|
72 |
+
transformers_version: 4.36.2
|
73 |
+
transformers_commit: null
|
74 |
+
accelerate_version: 0.26.1
|
75 |
+
accelerate_commit: null
|
76 |
+
diffusers_version: null
|
77 |
+
diffusers_commit: null
|
78 |
+
python_version: 3.10.12
|
79 |
+
system: Linux
|
80 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
81 |
+
cpu_count: 32
|
82 |
+
cpu_ram_mb: 134796
|
83 |
+
gpus:
|
84 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-220M-GQA/inference_results.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward.latency(s),forward.throughput(samples/s),forward.peak_memory(MB),forward.max_memory_used(MB),forward.max_memory_allocated(MB),forward.max_memory_reserved(MB),forward.energy_consumption(kWh/sample),forward.carbon_emissions(kgCO2eq/sample),generate.latency(s),generate.throughput(tokens/s),decode.latency(s),decode.throughput(tokens/s),generate.peak_memory(MB),generate.max_memory_used(MB),generate.max_memory_allocated(MB),generate.max_memory_reserved(MB),generate.energy_consumption(kWh/token),generate.carbon_emissions(kgCO2eq/token)
|
2 |
+
0.00388,258.0,1397,1397,497,538,1.96e-07,1.32e-08,1.04,246.0,1.04,245.0,1408,1408,500,549,1.36e-07,9.18e-09
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/.hydra/config.yaml
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
backend:
|
2 |
+
name: pytorch
|
3 |
+
version: ${pytorch_version:}
|
4 |
+
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
|
5 |
+
seed: 42
|
6 |
+
inter_op_num_threads: null
|
7 |
+
intra_op_num_threads: null
|
8 |
+
delete_cache: false
|
9 |
+
no_weights: true
|
10 |
+
device_map: null
|
11 |
+
torch_dtype: float16
|
12 |
+
eval_mode: ${is_inference:${benchmark.name}}
|
13 |
+
disable_grad: ${is_inference:${benchmark.name}}
|
14 |
+
amp_autocast: false
|
15 |
+
amp_dtype: null
|
16 |
+
torch_compile: false
|
17 |
+
torch_compile_config: {}
|
18 |
+
to_bettertransformer: false
|
19 |
+
use_flash_attention_2: false
|
20 |
+
quantization_scheme: null
|
21 |
+
quantization_config: {}
|
22 |
+
data_parallel: false
|
23 |
+
deepspeed_inference: false
|
24 |
+
deepspeed_inference_config: {}
|
25 |
+
peft_strategy: null
|
26 |
+
peft_config: {}
|
27 |
+
benchmark:
|
28 |
+
name: inference
|
29 |
+
_target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
|
30 |
+
duration: 10
|
31 |
+
warmup_runs: 10
|
32 |
+
memory: true
|
33 |
+
energy: true
|
34 |
+
input_shapes:
|
35 |
+
batch_size: 1
|
36 |
+
sequence_length: 256
|
37 |
+
num_choices: 1
|
38 |
+
feature_size: 80
|
39 |
+
nb_max_frames: 3000
|
40 |
+
audio_sequence_length: 16000
|
41 |
+
new_tokens: 256
|
42 |
+
can_diffuse: ${can_diffuse:${task}}
|
43 |
+
can_generate: ${can_generate:${task}}
|
44 |
+
forward_kwargs: {}
|
45 |
+
generate_kwargs: {}
|
46 |
+
launcher:
|
47 |
+
name: process
|
48 |
+
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
|
49 |
+
device_isolation: true
|
50 |
+
start_method: spawn
|
51 |
+
experiment_name: pytorch+cuda+float16
|
52 |
+
device: cuda
|
53 |
+
model: BEE-spoke-data/smol_llama-81M-tied
|
54 |
+
task: ${infer_task:${model}}
|
55 |
+
hub_kwargs:
|
56 |
+
revision: main
|
57 |
+
cache_dir: null
|
58 |
+
force_download: false
|
59 |
+
local_files_only: false
|
60 |
+
trust_remote_code: true
|
61 |
+
environment:
|
62 |
+
optimum_version: 1.16.1
|
63 |
+
optimum_commit: null
|
64 |
+
transformers_version: 4.36.2
|
65 |
+
transformers_commit: null
|
66 |
+
accelerate_version: 0.26.1
|
67 |
+
accelerate_commit: null
|
68 |
+
diffusers_version: null
|
69 |
+
diffusers_commit: null
|
70 |
+
python_version: 3.10.12
|
71 |
+
system: Linux
|
72 |
+
cpu: ' AMD Ryzen 9 7950X 16-Core Processor'
|
73 |
+
cpu_count: 32
|
74 |
+
cpu_ram_mb: 134796
|
75 |
+
gpus:
|
76 |
+
- NVIDIA GeForce RTX 4090
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/.hydra/hydra.yaml
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
hydra:
|
2 |
+
run:
|
3 |
+
dir: dataset/${oc.env:HOSTNAME}/${experiment_name}/${model}
|
4 |
+
sweep:
|
5 |
+
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
6 |
+
subdir: ${hydra.job.num}
|
7 |
+
launcher:
|
8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
9 |
+
sweeper:
|
10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
11 |
+
max_batch_size: null
|
12 |
+
params: null
|
13 |
+
help:
|
14 |
+
app_name: ${hydra.job.name}
|
15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
16 |
+
|
17 |
+
'
|
18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
19 |
+
|
20 |
+
Use --hydra-help to view Hydra specific help
|
21 |
+
|
22 |
+
'
|
23 |
+
template: '${hydra.help.header}
|
24 |
+
|
25 |
+
== Configuration groups ==
|
26 |
+
|
27 |
+
Compose your configuration from those groups (group=option)
|
28 |
+
|
29 |
+
|
30 |
+
$APP_CONFIG_GROUPS
|
31 |
+
|
32 |
+
|
33 |
+
== Config ==
|
34 |
+
|
35 |
+
Override anything in the config (foo.bar=value)
|
36 |
+
|
37 |
+
|
38 |
+
$CONFIG
|
39 |
+
|
40 |
+
|
41 |
+
${hydra.help.footer}
|
42 |
+
|
43 |
+
'
|
44 |
+
hydra_help:
|
45 |
+
template: 'Hydra (${hydra.runtime.version})
|
46 |
+
|
47 |
+
See https://hydra.cc for more info.
|
48 |
+
|
49 |
+
|
50 |
+
== Flags ==
|
51 |
+
|
52 |
+
$FLAGS_HELP
|
53 |
+
|
54 |
+
|
55 |
+
== Configuration groups ==
|
56 |
+
|
57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
58 |
+
to command line)
|
59 |
+
|
60 |
+
|
61 |
+
$HYDRA_CONFIG_GROUPS
|
62 |
+
|
63 |
+
|
64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
65 |
+
|
66 |
+
'
|
67 |
+
hydra_help: ???
|
68 |
+
hydra_logging:
|
69 |
+
version: 1
|
70 |
+
formatters:
|
71 |
+
colorlog:
|
72 |
+
(): colorlog.ColoredFormatter
|
73 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
|
74 |
+
handlers:
|
75 |
+
console:
|
76 |
+
class: logging.StreamHandler
|
77 |
+
formatter: colorlog
|
78 |
+
stream: ext://sys.stdout
|
79 |
+
root:
|
80 |
+
level: INFO
|
81 |
+
handlers:
|
82 |
+
- console
|
83 |
+
disable_existing_loggers: false
|
84 |
+
job_logging:
|
85 |
+
version: 1
|
86 |
+
formatters:
|
87 |
+
simple:
|
88 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
89 |
+
colorlog:
|
90 |
+
(): colorlog.ColoredFormatter
|
91 |
+
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
|
92 |
+
- %(message)s'
|
93 |
+
log_colors:
|
94 |
+
DEBUG: purple
|
95 |
+
INFO: green
|
96 |
+
WARNING: yellow
|
97 |
+
ERROR: red
|
98 |
+
CRITICAL: red
|
99 |
+
handlers:
|
100 |
+
console:
|
101 |
+
class: logging.StreamHandler
|
102 |
+
formatter: colorlog
|
103 |
+
stream: ext://sys.stdout
|
104 |
+
file:
|
105 |
+
class: logging.FileHandler
|
106 |
+
formatter: simple
|
107 |
+
filename: ${hydra.job.name}.log
|
108 |
+
root:
|
109 |
+
level: INFO
|
110 |
+
handlers:
|
111 |
+
- console
|
112 |
+
- file
|
113 |
+
disable_existing_loggers: false
|
114 |
+
env: {}
|
115 |
+
mode: RUN
|
116 |
+
searchpath: []
|
117 |
+
callbacks: {}
|
118 |
+
output_subdir: .hydra
|
119 |
+
overrides:
|
120 |
+
hydra:
|
121 |
+
- hydra.mode=RUN
|
122 |
+
task:
|
123 |
+
- model=BEE-spoke-data/smol_llama-81M-tied
|
124 |
+
job:
|
125 |
+
name: cli
|
126 |
+
chdir: true
|
127 |
+
override_dirname: model=BEE-spoke-data/smol_llama-81M-tied
|
128 |
+
id: ???
|
129 |
+
num: ???
|
130 |
+
config_name: pytorch+cuda+float16
|
131 |
+
env_set:
|
132 |
+
COUNTRY_ISO_CODE: FRA
|
133 |
+
OVERRIDE_BENCHMARKS: '0'
|
134 |
+
CUDA_VISIBLE_DEVICES: '0'
|
135 |
+
CUDA_DEVICE_ORDER: PCI_BUS_ID
|
136 |
+
env_copy: []
|
137 |
+
config:
|
138 |
+
override_dirname:
|
139 |
+
kv_sep: '='
|
140 |
+
item_sep: ','
|
141 |
+
exclude_keys: []
|
142 |
+
runtime:
|
143 |
+
version: 1.3.2
|
144 |
+
version_base: '1.3'
|
145 |
+
cwd: /workspace/llm-perf
|
146 |
+
config_sources:
|
147 |
+
- path: hydra.conf
|
148 |
+
schema: pkg
|
149 |
+
provider: hydra
|
150 |
+
- path: optimum_benchmark
|
151 |
+
schema: pkg
|
152 |
+
provider: main
|
153 |
+
- path: hydra_plugins.hydra_colorlog.conf
|
154 |
+
schema: pkg
|
155 |
+
provider: hydra-colorlog
|
156 |
+
- path: /workspace/llm-perf/configs
|
157 |
+
schema: file
|
158 |
+
provider: command-line
|
159 |
+
- path: ''
|
160 |
+
schema: structured
|
161 |
+
provider: schema
|
162 |
+
output_dir: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied
|
163 |
+
choices:
|
164 |
+
launcher: process
|
165 |
+
benchmark: inference
|
166 |
+
backend: pytorch
|
167 |
+
hydra/env: default
|
168 |
+
hydra/callbacks: null
|
169 |
+
hydra/job_logging: colorlog
|
170 |
+
hydra/hydra_logging: colorlog
|
171 |
+
hydra/hydra_help: default
|
172 |
+
hydra/help: default
|
173 |
+
hydra/sweeper: basic
|
174 |
+
hydra/launcher: basic
|
175 |
+
hydra/output: default
|
176 |
+
verbose: false
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/.hydra/overrides.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
- model=BEE-spoke-data/smol_llama-81M-tied
|
audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/cli.log
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-01-13 09:42:08,442][launcher][INFO] - Configuring process launcher
|
2 |
+
[2024-01-13 09:42:08,442][process][INFO] - Setting multiprocessing start method to spawn.
|
3 |
+
[2024-01-13 09:42:08,444][process][INFO] - + Launched worker process with PID 495096.
|
4 |
+
[2024-01-13 09:42:08,444][isolation][INFO] - + Launched device(s) isolation process 495097.
|
5 |
+
[2024-01-13 09:42:10,300][numexpr.utils][INFO] - Note: NumExpr detected 32 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
|
6 |
+
[2024-01-13 09:42:10,300][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
|
7 |
+
[2024-01-13 09:42:10,443][datasets][INFO] - PyTorch version 2.1.2+cu118 available.
|
8 |
+
[2024-01-13 09:42:11,633][backend][INFO] - Configuring pytorch backend
|
9 |
+
[2024-01-13 09:42:11,633][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
|
10 |
+
[2024-01-13 09:42:11,633][pytorch][INFO] - + Disabling gradients
|
11 |
+
[2024-01-13 09:42:11,633][pytorch][INFO] - + Loading model with no weights
|
12 |
+
[2024-01-13 09:42:11,633][pytorch][INFO] - + Creating no weights model directory
|
13 |
+
[2024-01-13 09:42:11,633][pytorch][INFO] - + Saving pretrained config
|
14 |
+
[2024-01-13 09:42:11,634][pytorch][INFO] - + Creating no weights model
|
15 |
+
[2024-01-13 09:42:11,634][pytorch][INFO] - + Saving no weights model
|
16 |
+
[2024-01-13 09:42:11,635][pytorch][INFO] - + Loading no weights model
|
17 |
+
[2024-01-13 09:42:11,635][pytorch][INFO] - + Loading model directly on device: cuda
|
18 |
+
[2024-01-13 09:42:11,751][pytorch][INFO] - + Randomizing model weights
|
19 |
+
[2024-01-13 09:42:11,751][pytorch][INFO] - + Tying model weights after randomization
|
20 |
+
[2024-01-13 09:42:11,752][pytorch][INFO] - + Turning on model's eval mode
|
21 |
+
[2024-01-13 09:42:11,812][benchmark][INFO] - Configuring inference benchmark
|
22 |
+
[2024-01-13 09:42:11,812][inference][INFO] - Running inference benchmark
|
23 |
+
[2024-01-13 09:42:11,812][inference][INFO] - + Updating input shapes with model shapes
|
24 |
+
[2024-01-13 09:42:11,812][inference][INFO] - + Preparing backend for inference
|
25 |
+
[2024-01-13 09:42:11,813][inference][INFO] - + Creating input generator
|
26 |
+
[2024-01-13 09:42:11,813][input-generator][INFO] - Using text-generation task generator
|
27 |
+
[2024-01-13 09:42:11,813][inference][INFO] - + Preparing input for the forward pass
|
28 |
+
[2024-01-13 09:42:11,813][backend][INFO] - + Moving inputs tensors to device cuda
|
29 |
+
[2024-01-13 09:42:11,813][inference][INFO] - + Tracking forward pass peak memory
|
30 |
+
[2024-01-13 09:42:11,813][memory][INFO] - Tracking CUDA devices: [0]
|
31 |
+
[2024-01-13 09:42:11,813][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
32 |
+
[2024-01-13 09:42:11,978][inference][INFO] - + Forward pass max memory used: 1118 (MB)
|
33 |
+
[2024-01-13 09:42:11,978][inference][INFO] - + Forward pass max memory reserved: 260 (MB)
|
34 |
+
[2024-01-13 09:42:11,978][inference][INFO] - + Forward pass max memory allocated: 232 (MB)
|
35 |
+
[2024-01-13 09:42:11,978][inference][INFO] - + Preparing input for the generation pass
|
36 |
+
[2024-01-13 09:42:11,978][backend][INFO] - + Moving inputs tensors to device cuda
|
37 |
+
[2024-01-13 09:42:11,978][inference][INFO] - + Tracking generation pass peak memory
|
38 |
+
[2024-01-13 09:42:11,978][memory][INFO] - Tracking CUDA devices: [0]
|
39 |
+
[2024-01-13 09:42:11,978][memory][INFO] - Tracking Pytorch CUDA devices: [0]
|
40 |
+
[2024-01-13 09:42:12,772][inference][INFO] - + Generation pass max memory used: 1135 (MB)
|
41 |
+
[2024-01-13 09:42:12,772][inference][INFO] - + Generation pass max memory reserved: 276 (MB)
|
42 |
+
[2024-01-13 09:42:12,772][inference][INFO] - + Generation pass max memory allocated: 232 (MB)
|
43 |
+
[2024-01-13 09:42:12,772][inference][INFO] - + Preparing input for the forward pass
|
44 |
+
[2024-01-13 09:42:12,772][backend][INFO] - + Moving inputs tensors to device cuda
|
45 |
+
[2024-01-13 09:42:12,772][inference][INFO] - + Warming up the forward pass
|
46 |
+
[2024-01-13 09:42:12,794][inference][INFO] - + Tracking forward pass latency and throughput
|
47 |
+
[2024-01-13 09:42:22,918][inference][INFO] - + Forward pass latency: 2.24e-03 (s)
|
48 |
+
[2024-01-13 09:42:22,920][inference][INFO] - + Forward pass throughput: 446.00 (samples/s)
|
49 |
+
[2024-01-13 09:42:22,920][inference][INFO] - + Preparing input for the generation pass
|
50 |
+
[2024-01-13 09:42:22,920][backend][INFO] - + Moving inputs tensors to device cuda
|
51 |
+
[2024-01-13 09:42:22,920][inference][INFO] - + Warming up the generation pass
|
52 |
+
[2024-01-13 09:42:23,547][inference][INFO] - + Tracking generation latency and throughput
|
53 |
+
[2024-01-13 09:42:33,554][inference][INFO] - + Generation pass latency: 6.25e-01 (s)
|
54 |
+
[2024-01-13 09:42:33,554][inference][INFO] - + Generation pass throughput: 410.00 (tokens/s)
|
55 |
+
[2024-01-13 09:42:33,554][inference][INFO] - + Preparing input for the forward pass
|
56 |
+
[2024-01-13 09:42:33,554][backend][INFO] - + Moving inputs tensors to device cuda
|
57 |
+
[2024-01-13 09:42:33,554][inference][INFO] - + Tracking forward pass energy consumption
|
58 |
+
[2024-01-13 09:42:48,010][inference][INFO] - + Forward pass energy consumption: 1.06e-07 (kWh/sample)
|
59 |
+
[2024-01-13 09:42:48,010][inference][INFO] - + Forward pass carbon emissions: 7.14e-09 (kgCO2eq/sample)
|
60 |
+
[2024-01-13 09:42:48,010][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/forward_codecarbon.csv
|
61 |
+
[2024-01-13 09:42:48,010][inference][INFO] - + Preparing input for the generation pass
|
62 |
+
[2024-01-13 09:42:48,010][backend][INFO] - + Moving inputs tensors to device cuda
|
63 |
+
[2024-01-13 09:42:48,010][inference][INFO] - + Tracking generation pass energy consumption
|
64 |
+
[2024-01-13 09:43:03,094][inference][INFO] - + Generation pass energy consumption: 7.33e-08 (kWh/token)
|
65 |
+
[2024-01-13 09:43:03,094][inference][INFO] - + Generation pass carbon emissions: 4.94e-09 (kgCO2eq/token)
|
66 |
+
[2024-01-13 09:43:03,094][inference][INFO] - + Full details in the CodeCarbon report: /workspace/llm-perf/dataset/audace/pytorch+cuda+float16/BEE-spoke-data/smol_llama-81M-tied/generate_codecarbon.csv
|
67 |
+
[2024-01-13 09:43:03,094][inference][INFO] - Saving results
|
68 |
+
[2024-01-13 09:43:03,101][backend][INFO] - Cleaning pytorch backend
|
69 |
+
[2024-01-13 09:43:03,101][backend][INFO] - + Deleting pretrained model
|
70 |
+
[2024-01-13 09:43:03,186][pytorch][INFO] - + Emptying CUDA cache
|
71 |
+
[2024-01-13 09:43:03,195][pytorch][INFO] - + Cleaning temporary directory
|
72 |
+
[2024-01-13 09:43:03,689][isolation][INFO] - + Closing device(s) isolation process...
|