IlyasMoutawwakil HF staff commited on
Commit
808523a
·
verified ·
1 Parent(s): b5a5a39

Upload cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_fill-mask_google-bert/bert-base-uncased/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.2.2",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "model": "google-bert/bert-base-uncased",
@@ -81,7 +81,7 @@
81
  "machine": "x86_64",
82
  "platform": "Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35",
83
  "processor": "x86_64",
84
- "python_version": "3.10.14",
85
  "gpu": [
86
  "NVIDIA A10G"
87
  ],
@@ -107,7 +107,7 @@
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
- "max_ram": 1063.75168,
111
  "max_global_vram": 3169.32096,
112
  "max_process_vram": 0.0,
113
  "max_reserved": 2520.776704,
@@ -116,24 +116,24 @@
116
  "latency": {
117
  "unit": "s",
118
  "count": 5,
119
- "total": 0.868150291442871,
120
- "mean": 0.1736300582885742,
121
- "stdev": 0.25364250009278855,
122
- "p50": 0.046750720977783204,
123
- "p90": 0.4276643966674805,
124
- "p95": 0.5542891685485838,
125
- "p99": 0.6555889860534667,
126
  "values": [
127
- 0.6809139404296874,
128
- 0.04779008102416992,
129
- 0.046750720977783204,
130
- 0.04646297454833984,
131
- 0.04623257446289063
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "samples/s",
136
- "value": 57.59371446722629
137
  },
138
  "energy": null,
139
  "efficiency": null
@@ -141,7 +141,7 @@
141
  "warmup": {
142
  "memory": {
143
  "unit": "MB",
144
- "max_ram": 1063.75168,
145
  "max_global_vram": 3169.32096,
146
  "max_process_vram": 0.0,
147
  "max_reserved": 2520.776704,
@@ -150,21 +150,21 @@
150
  "latency": {
151
  "unit": "s",
152
  "count": 2,
153
- "total": 0.7287040214538574,
154
- "mean": 0.3643520107269287,
155
- "stdev": 0.31656192970275876,
156
- "p50": 0.3643520107269287,
157
- "p90": 0.6176015544891357,
158
- "p95": 0.6492577474594116,
159
- "p99": 0.6745827018356323,
160
  "values": [
161
- 0.6809139404296874,
162
- 0.04779008102416992
163
  ]
164
  },
165
  "throughput": {
166
  "unit": "samples/s",
167
- "value": 10.978394196369303
168
  },
169
  "energy": null,
170
  "efficiency": null
@@ -172,7 +172,7 @@
172
  "train": {
173
  "memory": {
174
  "unit": "MB",
175
- "max_ram": 1063.75168,
176
  "max_global_vram": 3169.32096,
177
  "max_process_vram": 0.0,
178
  "max_reserved": 2520.776704,
@@ -181,22 +181,22 @@
181
  "latency": {
182
  "unit": "s",
183
  "count": 3,
184
- "total": 0.13944626998901366,
185
- "mean": 0.046482089996337884,
186
- "stdev": 0.00021196383809187117,
187
- "p50": 0.04646297454833984,
188
- "p90": 0.04669317169189453,
189
- "p95": 0.046721946334838865,
190
- "p99": 0.046744966049194335,
191
  "values": [
192
- 0.046750720977783204,
193
- 0.04646297454833984,
194
- 0.04623257446289063
195
  ]
196
  },
197
  "throughput": {
198
  "unit": "samples/s",
199
- "value": 129.0819754549056
200
  },
201
  "energy": null,
202
  "efficiency": null
 
3
  "name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.3.0+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "fill-mask",
9
  "model": "google-bert/bert-base-uncased",
 
81
  "machine": "x86_64",
82
  "platform": "Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35",
83
  "processor": "x86_64",
84
+ "python_version": "3.10.12",
85
  "gpu": [
86
  "NVIDIA A10G"
87
  ],
 
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
+ "max_ram": 1080.045568,
111
  "max_global_vram": 3169.32096,
112
  "max_process_vram": 0.0,
113
  "max_reserved": 2520.776704,
 
116
  "latency": {
117
  "unit": "s",
118
  "count": 5,
119
+ "total": 0.8035778198242188,
120
+ "mean": 0.16071556396484377,
121
+ "stdev": 0.2263992327397222,
122
+ "p50": 0.04726777648925781,
123
+ "p90": 0.3874406570434571,
124
+ "p95": 0.5004769508361815,
125
+ "p99": 0.5909059858703613,
126
  "values": [
127
+ 0.6135132446289062,
128
+ 0.048331775665283204,
129
+ 0.04721561431884766,
130
+ 0.04726777648925781,
131
+ 0.047249408721923826
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "samples/s",
136
+ "value": 62.22172733804102
137
  },
138
  "energy": null,
139
  "efficiency": null
 
141
  "warmup": {
142
  "memory": {
143
  "unit": "MB",
144
+ "max_ram": 1080.045568,
145
  "max_global_vram": 3169.32096,
146
  "max_process_vram": 0.0,
147
  "max_reserved": 2520.776704,
 
150
  "latency": {
151
  "unit": "s",
152
  "count": 2,
153
+ "total": 0.6618450202941895,
154
+ "mean": 0.33092251014709473,
155
+ "stdev": 0.2825907344818115,
156
+ "p50": 0.33092251014709473,
157
+ "p90": 0.556995097732544,
158
+ "p95": 0.5852541711807251,
159
+ "p99": 0.60786142993927,
160
  "values": [
161
+ 0.6135132446289062,
162
+ 0.048331775665283204
163
  ]
164
  },
165
  "throughput": {
166
  "unit": "samples/s",
167
+ "value": 12.087421911014768
168
  },
169
  "energy": null,
170
  "efficiency": null
 
172
  "train": {
173
  "memory": {
174
  "unit": "MB",
175
+ "max_ram": 1080.045568,
176
  "max_global_vram": 3169.32096,
177
  "max_process_vram": 0.0,
178
  "max_reserved": 2520.776704,
 
181
  "latency": {
182
  "unit": "s",
183
  "count": 3,
184
+ "total": 0.1417327995300293,
185
+ "mean": 0.047244266510009764,
186
+ "stdev": 2.1603313989621717e-05,
187
+ "p50": 0.047249408721923826,
188
+ "p90": 0.047264102935791016,
189
+ "p95": 0.047265939712524414,
190
+ "p99": 0.047267409133911135,
191
  "values": [
192
+ 0.04721561431884766,
193
+ 0.04726777648925781,
194
+ 0.047249408721923826
195
  ]
196
  },
197
  "throughput": {
198
  "unit": "samples/s",
199
+ "value": 126.99953757835915
200
  },
201
  "energy": null,
202
  "efficiency": null