IlyasMoutawwakil HF staff commited on
Commit
2a23e9e
·
verified ·
1 Parent(s): 6a5a4a9

Upload cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.0+rocm6.1",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
@@ -110,33 +110,33 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1627.81184,
114
- "max_global_vram": 0.0,
115
- "max_process_vram": 0.0,
116
  "max_reserved": 2497.708032,
117
  "max_allocated": 2195.345408
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.7329203910827637,
123
- "mean": 0.14658407821655275,
124
- "stdev": 0.2058411432508353,
125
- "p50": 0.043722270965576175,
126
- "p90": 0.35263223724365245,
127
- "p95": 0.45544905319213863,
128
- "p99": 0.5377025059509277,
129
  "values": [
130
- 0.558265869140625,
131
- 0.04418178939819336,
132
- 0.043722270965576175,
133
- 0.04356467056274414,
134
- 0.043185791015625
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 68.2202332045007
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,30 +144,30 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1627.81184,
148
- "max_global_vram": 0.0,
149
- "max_process_vram": 0.0,
150
  "max_reserved": 2497.708032,
151
  "max_allocated": 2195.345408
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.6024476585388184,
157
- "mean": 0.3012238292694092,
158
- "stdev": 0.2570420398712159,
159
- "p50": 0.3012238292694092,
160
- "p90": 0.5068574611663819,
161
- "p95": 0.5325616651535034,
162
- "p99": 0.5531250283432008,
163
  "values": [
164
- 0.558265869140625,
165
- 0.04418178939819336
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 13.27916190993798
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,31 +175,31 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1627.81184,
179
- "max_global_vram": 0.0,
180
- "max_process_vram": 0.0,
181
  "max_reserved": 2497.708032,
182
  "max_allocated": 2195.345408
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.1304727325439453,
188
- "mean": 0.04349091084798177,
189
- "stdev": 0.00022514151966599846,
190
- "p50": 0.04356467056274414,
191
- "p90": 0.04369075088500977,
192
- "p95": 0.04370651092529297,
193
- "p99": 0.04371911895751954,
194
  "values": [
195
- 0.043722270965576175,
196
- 0.04356467056274414,
197
- 0.043185791015625
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 137.9598606470307
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.2.2+rocm5.7",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "library": "transformers",
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1137.258496,
114
+ "max_global_vram": 2953.19552,
115
+ "max_process_vram": 308401.446912,
116
  "max_reserved": 2497.708032,
117
  "max_allocated": 2195.345408
118
  },
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.6839748382568359,
123
+ "mean": 0.13679496765136717,
124
+ "stdev": 0.19247733518636917,
125
+ "p50": 0.04070725250244141,
126
+ "p90": 0.32934854431152344,
127
+ "p95": 0.4255490280151366,
128
+ "p99": 0.5025094149780273,
129
  "values": [
130
+ 0.52174951171875,
131
+ 0.04070725250244141,
132
+ 0.04034069061279297,
133
+ 0.04043029022216797,
134
+ 0.04074709320068359
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 73.10210435142463
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1137.258496,
148
+ "max_global_vram": 2953.19552,
149
+ "max_process_vram": 308401.446912,
150
  "max_reserved": 2497.708032,
151
  "max_allocated": 2195.345408
152
  },
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.5624567642211914,
157
+ "mean": 0.2812283821105957,
158
+ "stdev": 0.24052112960815428,
159
+ "p50": 0.28122838211059564,
160
+ "p90": 0.4736452857971191,
161
+ "p95": 0.4976973987579345,
162
+ "p99": 0.5169390891265868,
163
  "values": [
164
+ 0.52174951171875,
165
+ 0.04070725250244141
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 14.223315477550067
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1137.258496,
179
+ "max_global_vram": 2953.19552,
180
+ "max_process_vram": 308401.446912,
181
  "max_reserved": 2497.708032,
182
  "max_allocated": 2195.345408
183
  },
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.12151807403564453,
188
+ "mean": 0.04050602467854818,
189
+ "stdev": 0.00017434170809735167,
190
+ "p50": 0.04043029022216797,
191
+ "p90": 0.04068373260498047,
192
+ "p95": 0.04071541290283203,
193
+ "p99": 0.04074075714111328,
194
  "values": [
195
+ 0.04034069061279297,
196
+ 0.04043029022216797,
197
+ 0.04074709320068359
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 148.12611327859025
203
  },
204
  "energy": null,
205
  "efficiency": null