Upload cuda_inference_diffusers_stable-diffusion_CompVis/stable-diffusion-v1-4/benchmark.json with huggingface_hub
Browse files
cuda_inference_diffusers_stable-diffusion_CompVis/stable-diffusion-v1-4/benchmark.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"name": "cuda_inference_diffusers_stable-diffusion_CompVis/stable-diffusion-v1-4",
|
4 |
"backend": {
|
5 |
"name": "pytorch",
|
6 |
-
"version": "2.
|
7 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
8 |
"task": "stable-diffusion",
|
9 |
"model": "CompVis/stable-diffusion-v1-4",
|
@@ -78,7 +78,7 @@
|
|
78 |
"machine": "x86_64",
|
79 |
"platform": "Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35",
|
80 |
"processor": "x86_64",
|
81 |
-
"python_version": "3.10.
|
82 |
"gpu": [
|
83 |
"NVIDIA A10G"
|
84 |
],
|
@@ -104,41 +104,41 @@
|
|
104 |
"call": {
|
105 |
"memory": {
|
106 |
"unit": "MB",
|
107 |
-
"max_ram":
|
108 |
-
"max_global_vram":
|
109 |
"max_process_vram": 0.0,
|
110 |
-
"max_reserved":
|
111 |
-
"max_allocated":
|
112 |
},
|
113 |
"latency": {
|
114 |
"unit": "s",
|
115 |
"count": 2,
|
116 |
-
"total": 1.
|
117 |
-
"mean": 0.
|
118 |
-
"stdev": 0.
|
119 |
-
"p50": 0.
|
120 |
-
"p90": 0.
|
121 |
-
"p95": 0.
|
122 |
-
"p99": 0.
|
123 |
"values": [
|
124 |
-
0.
|
125 |
-
0.
|
126 |
]
|
127 |
},
|
128 |
"throughput": {
|
129 |
"unit": "images/s",
|
130 |
-
"value": 1.
|
131 |
},
|
132 |
"energy": {
|
133 |
"unit": "kWh",
|
134 |
-
"cpu": 7.
|
135 |
-
"ram": 4.
|
136 |
-
"gpu": 4.
|
137 |
-
"total": 5.
|
138 |
},
|
139 |
"efficiency": {
|
140 |
"unit": "images/kWh",
|
141 |
-
"value":
|
142 |
}
|
143 |
}
|
144 |
}
|
|
|
3 |
"name": "cuda_inference_diffusers_stable-diffusion_CompVis/stable-diffusion-v1-4",
|
4 |
"backend": {
|
5 |
"name": "pytorch",
|
6 |
+
"version": "2.3.0+cu121",
|
7 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
8 |
"task": "stable-diffusion",
|
9 |
"model": "CompVis/stable-diffusion-v1-4",
|
|
|
78 |
"machine": "x86_64",
|
79 |
"platform": "Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35",
|
80 |
"processor": "x86_64",
|
81 |
+
"python_version": "3.10.12",
|
82 |
"gpu": [
|
83 |
"NVIDIA A10G"
|
84 |
],
|
|
|
104 |
"call": {
|
105 |
"memory": {
|
106 |
"unit": "MB",
|
107 |
+
"max_ram": 1316.27008,
|
108 |
+
"max_global_vram": 8173.125632,
|
109 |
"max_process_vram": 0.0,
|
110 |
+
"max_reserved": 7518.28992,
|
111 |
+
"max_allocated": 6527.82592
|
112 |
},
|
113 |
"latency": {
|
114 |
"unit": "s",
|
115 |
"count": 2,
|
116 |
+
"total": 1.2613369140625,
|
117 |
+
"mean": 0.63066845703125,
|
118 |
+
"stdev": 0.0005619506835937149,
|
119 |
+
"p50": 0.63066845703125,
|
120 |
+
"p90": 0.631118017578125,
|
121 |
+
"p95": 0.6311742126464843,
|
122 |
+
"p99": 0.6312191687011719,
|
123 |
"values": [
|
124 |
+
0.6312304077148437,
|
125 |
+
0.6301065063476563
|
126 |
]
|
127 |
},
|
128 |
"throughput": {
|
129 |
"unit": "images/s",
|
130 |
+
"value": 1.585619177320691
|
131 |
},
|
132 |
"energy": {
|
133 |
"unit": "kWh",
|
134 |
+
"cpu": 7.432667165994645e-06,
|
135 |
+
"ram": 4.0645859895448666e-06,
|
136 |
+
"gpu": 4.004933759500003e-05,
|
137 |
+
"total": 5.154659075053954e-05
|
138 |
},
|
139 |
"efficiency": {
|
140 |
"unit": "images/kWh",
|
141 |
+
"value": 19399.925105416074
|
142 |
}
|
143 |
}
|
144 |
}
|