IlyasMoutawwakil HF staff commited on
Commit
bac4fb3
·
verified ·
1 Parent(s): 36a54de

Upload cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.3.1+rocm5.7",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
@@ -117,33 +117,33 @@
117
  "overall": {
118
  "memory": {
119
  "unit": "MB",
120
- "max_ram": 1276.633088,
121
- "max_global_vram": 3154.223104,
122
- "max_process_vram": 290500.026368,
123
  "max_reserved": 2707.423232,
124
  "max_allocated": 2497.88416
125
  },
126
  "latency": {
127
  "unit": "s",
128
  "count": 5,
129
- "total": 0.7486629486083984,
130
- "mean": 0.1497325897216797,
131
- "stdev": 0.2112550541838403,
132
- "p50": 0.04386098098754883,
133
- "p90": 0.36172220306396485,
134
- "p95": 0.4669802116394042,
135
- "p99": 0.5511866184997558,
136
  "values": [
137
- 0.5722382202148437,
138
- 0.04594817733764649,
139
- 0.043338584899902347,
140
- 0.04327698516845703,
141
- 0.04386098098754883
142
  ]
143
  },
144
  "throughput": {
145
  "unit": "samples/s",
146
- "value": 66.78572793396431
147
  },
148
  "energy": null,
149
  "efficiency": null
@@ -151,30 +151,30 @@
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
- "max_ram": 1276.633088,
155
- "max_global_vram": 3154.223104,
156
- "max_process_vram": 290500.026368,
157
  "max_reserved": 2707.423232,
158
  "max_allocated": 2497.88416
159
  },
160
  "latency": {
161
  "unit": "s",
162
  "count": 2,
163
- "total": 0.6181863975524902,
164
- "mean": 0.3090931987762451,
165
- "stdev": 0.2631450214385986,
166
- "p50": 0.3090931987762451,
167
- "p90": 0.519609215927124,
168
- "p95": 0.5459237180709838,
169
- "p99": 0.5669753197860717,
170
  "values": [
171
- 0.5722382202148437,
172
- 0.04594817733764649
173
  ]
174
  },
175
  "throughput": {
176
  "unit": "samples/s",
177
- "value": 12.941080605580165
178
  },
179
  "energy": null,
180
  "efficiency": null
@@ -182,31 +182,31 @@
182
  "train": {
183
  "memory": {
184
  "unit": "MB",
185
- "max_ram": 1276.633088,
186
- "max_global_vram": 3154.223104,
187
- "max_process_vram": 290500.026368,
188
  "max_reserved": 2707.423232,
189
  "max_allocated": 2497.88416
190
  },
191
  "latency": {
192
  "unit": "s",
193
  "count": 3,
194
- "total": 0.13047655105590822,
195
- "mean": 0.04349218368530274,
196
- "stdev": 0.00026198882843425593,
197
- "p50": 0.043338584899902347,
198
- "p90": 0.04375650177001953,
199
- "p95": 0.04380874137878418,
200
- "p99": 0.0438505330657959,
201
  "values": [
202
- 0.043338584899902347,
203
- 0.04327698516845703,
204
- 0.04386098098754883
205
  ]
206
  },
207
  "throughput": {
208
  "unit": "samples/s",
209
- "value": 137.95582312937697
210
  },
211
  "energy": null,
212
  "efficiency": null
 
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.4.0+rocm6.1",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
 
117
  "overall": {
118
  "memory": {
119
  "unit": "MB",
120
+ "max_ram": 1644.572672,
121
+ "max_global_vram": 11.276288,
122
+ "max_process_vram": 0.0,
123
  "max_reserved": 2707.423232,
124
  "max_allocated": 2497.88416
125
  },
126
  "latency": {
127
  "unit": "s",
128
  "count": 5,
129
+ "total": 0.7172655601501464,
130
+ "mean": 0.14345311203002928,
131
+ "stdev": 0.1951542751167389,
132
+ "p50": 0.046348625183105466,
133
+ "p90": 0.3388419570922852,
134
+ "p95": 0.43630122756958,
135
+ "p99": 0.514268643951416,
136
  "values": [
137
+ 0.533760498046875,
138
+ 0.04646414566040039,
139
+ 0.04529502487182617,
140
+ 0.04539726638793945,
141
+ 0.046348625183105466
142
  ]
143
  },
144
  "throughput": {
145
  "unit": "samples/s",
146
+ "value": 69.7091883089067
147
  },
148
  "energy": null,
149
  "efficiency": null
 
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
+ "max_ram": 1644.572672,
155
+ "max_global_vram": 11.276288,
156
+ "max_process_vram": 0.0,
157
  "max_reserved": 2707.423232,
158
  "max_allocated": 2497.88416
159
  },
160
  "latency": {
161
  "unit": "s",
162
  "count": 2,
163
+ "total": 0.5802246437072753,
164
+ "mean": 0.29011232185363767,
165
+ "stdev": 0.2436481761932373,
166
+ "p50": 0.29011232185363767,
167
+ "p90": 0.48503086280822755,
168
+ "p95": 0.5093956804275512,
169
+ "p99": 0.5288875345230102,
170
  "values": [
171
+ 0.533760498046875,
172
+ 0.04646414566040039
173
  ]
174
  },
175
  "throughput": {
176
  "unit": "samples/s",
177
+ "value": 13.787763216820585
178
  },
179
  "energy": null,
180
  "efficiency": null
 
182
  "train": {
183
  "memory": {
184
  "unit": "MB",
185
+ "max_ram": 1644.572672,
186
+ "max_global_vram": 11.276288,
187
+ "max_process_vram": 0.0,
188
  "max_reserved": 2707.423232,
189
  "max_allocated": 2497.88416
190
  },
191
  "latency": {
192
  "unit": "s",
193
  "count": 3,
194
+ "total": 0.13704091644287109,
195
+ "mean": 0.04568030548095703,
196
+ "stdev": 0.00047441314632861044,
197
+ "p50": 0.04539726638793945,
198
+ "p90": 0.04615835342407226,
199
+ "p95": 0.046253489303588864,
200
+ "p99": 0.04632959800720215,
201
  "values": [
202
+ 0.04529502487182617,
203
+ 0.04539726638793945,
204
+ 0.046348625183105466
205
  ]
206
  },
207
  "throughput": {
208
  "unit": "samples/s",
209
+ "value": 131.34763300786702
210
  },
211
  "energy": null,
212
  "efficiency": null