IlyasMoutawwakil HF staff commited on
Commit
3877d59
·
verified ·
1 Parent(s): 7c67340

Upload cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.3.1+rocm5.7",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
@@ -117,33 +117,33 @@
117
  "overall": {
118
  "memory": {
119
  "unit": "MB",
120
- "max_ram": 1293.582336,
121
- "max_global_vram": 3344.150528,
122
- "max_process_vram": 346535.256064,
123
- "max_reserved": 2894.06976,
124
  "max_allocated": 2506.73664
125
  },
126
  "latency": {
127
  "unit": "s",
128
  "count": 5,
129
- "total": 0.7668404998779297,
130
- "mean": 0.15336809997558593,
131
- "stdev": 0.22141835623342185,
132
- "p50": 0.04294402313232422,
133
- "p90": 0.37508600769042977,
134
- "p95": 0.48564432220458975,
135
- "p99": 0.5740909738159179,
136
  "values": [
137
- 0.59620263671875,
138
- 0.04135843276977539,
139
- 0.04341106414794922,
140
- 0.04294402313232422,
141
- 0.04292434310913086
142
  ]
143
  },
144
  "throughput": {
145
  "unit": "samples/s",
146
- "value": 65.20260733224094
147
  },
148
  "energy": null,
149
  "efficiency": null
@@ -151,30 +151,30 @@
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
- "max_ram": 1293.582336,
155
- "max_global_vram": 3344.150528,
156
- "max_process_vram": 346535.256064,
157
- "max_reserved": 2894.06976,
158
  "max_allocated": 2506.73664
159
  },
160
  "latency": {
161
  "unit": "s",
162
  "count": 2,
163
- "total": 0.6375610694885253,
164
- "mean": 0.31878053474426266,
165
- "stdev": 0.27742210197448725,
166
- "p50": 0.31878053474426266,
167
- "p90": 0.5407182163238525,
168
- "p95": 0.5684604265213012,
169
- "p99": 0.5906541946792602,
170
  "values": [
171
- 0.59620263671875,
172
- 0.04135843276977539
173
  ]
174
  },
175
  "throughput": {
176
  "unit": "samples/s",
177
- "value": 12.547817586192789
178
  },
179
  "energy": null,
180
  "efficiency": null
@@ -182,31 +182,31 @@
182
  "train": {
183
  "memory": {
184
  "unit": "MB",
185
- "max_ram": 1293.582336,
186
- "max_global_vram": 3344.150528,
187
- "max_process_vram": 346535.256064,
188
- "max_reserved": 2894.06976,
189
  "max_allocated": 2506.73664
190
  },
191
  "latency": {
192
  "unit": "s",
193
  "count": 3,
194
- "total": 0.12927943038940432,
195
- "mean": 0.043093143463134774,
196
- "stdev": 0.0002249473971145129,
197
- "p50": 0.04294402313232422,
198
- "p90": 0.04331765594482422,
199
- "p95": 0.04336436004638672,
200
- "p99": 0.04340172332763672,
201
  "values": [
202
- 0.04341106414794922,
203
- 0.04294402313232422,
204
- 0.04292434310913086
205
  ]
206
  },
207
  "throughput": {
208
  "unit": "samples/s",
209
- "value": 139.23328673232822
210
  },
211
  "energy": null,
212
  "efficiency": null
 
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.4.0+rocm6.1",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
 
117
  "overall": {
118
  "memory": {
119
  "unit": "MB",
120
+ "max_ram": 1640.144896,
121
+ "max_global_vram": 11.239424,
122
+ "max_process_vram": 0.0,
123
+ "max_reserved": 2889.875456,
124
  "max_allocated": 2506.73664
125
  },
126
  "latency": {
127
  "unit": "s",
128
  "count": 5,
129
+ "total": 0.9606382255554198,
130
+ "mean": 0.19212764511108396,
131
+ "stdev": 0.2875477069341439,
132
+ "p50": 0.04804556655883789,
133
+ "p90": 0.48009199523925783,
134
+ "p95": 0.6236570205688475,
135
+ "p99": 0.7385090408325194,
136
  "values": [
137
+ 0.7672220458984375,
138
+ 0.04939691925048828,
139
+ 0.04803228759765625,
140
+ 0.04794140625,
141
+ 0.04804556655883789
142
  ]
143
  },
144
  "throughput": {
145
  "unit": "samples/s",
146
+ "value": 52.04873038556331
147
  },
148
  "energy": null,
149
  "efficiency": null
 
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
+ "max_ram": 1640.144896,
155
+ "max_global_vram": 11.239424,
156
+ "max_process_vram": 0.0,
157
+ "max_reserved": 2889.875456,
158
  "max_allocated": 2506.73664
159
  },
160
  "latency": {
161
  "unit": "s",
162
  "count": 2,
163
+ "total": 0.8166189651489257,
164
+ "mean": 0.40830948257446287,
165
+ "stdev": 0.3589125633239746,
166
+ "p50": 0.40830948257446287,
167
+ "p90": 0.6954395332336425,
168
+ "p95": 0.7313307895660399,
169
+ "p99": 0.7600437946319579,
170
  "values": [
171
+ 0.7672220458984375,
172
+ 0.04939691925048828
173
  ]
174
  },
175
  "throughput": {
176
  "unit": "samples/s",
177
+ "value": 9.796490580574565
178
  },
179
  "energy": null,
180
  "efficiency": null
 
182
  "train": {
183
  "memory": {
184
  "unit": "MB",
185
+ "max_ram": 1640.144896,
186
+ "max_global_vram": 11.239424,
187
+ "max_process_vram": 0.0,
188
+ "max_reserved": 2889.875456,
189
  "max_allocated": 2506.73664
190
  },
191
  "latency": {
192
  "unit": "s",
193
  "count": 3,
194
+ "total": 0.14401926040649415,
195
+ "mean": 0.04800642013549805,
196
+ "stdev": 4.6290291877677313e-05,
197
+ "p50": 0.04803228759765625,
198
+ "p90": 0.04804291076660156,
199
+ "p95": 0.04804423866271973,
200
+ "p99": 0.04804530097961426,
201
  "values": [
202
+ 0.04803228759765625,
203
+ 0.04794140625,
204
+ 0.04804556655883789
205
  ]
206
  },
207
  "throughput": {
208
  "unit": "samples/s",
209
+ "value": 124.98328313306864
210
  },
211
  "energy": null,
212
  "efficiency": null