Upload folder using huggingface_hub

#2
by begumcig - opened
Files changed (3) hide show
  1. base_results.json +19 -0
  2. plots.png +0 -0
  3. smashed_results.json +19 -0
base_results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "current_gpu_type": "Tesla T4",
3
+ "current_gpu_total_memory": 15095.0625,
4
+ "perplexity": 3.4586403369903564,
5
+ "memory_inference_first": 808.0,
6
+ "memory_inference": 808.0,
7
+ "token_generation_latency_sync": 36.94038696289063,
8
+ "token_generation_latency_async": 37.75534201413393,
9
+ "token_generation_throughput_sync": 0.027070642248674184,
10
+ "token_generation_throughput_async": 0.026486318138123188,
11
+ "token_generation_CO2_emissions": 2.9548104595618817e-05,
12
+ "token_generation_energy_consumption": 0.0024448525932904515,
13
+ "inference_latency_sync": 259.3957290649414,
14
+ "inference_latency_async": 82.87992477416992,
15
+ "inference_throughput_sync": 0.0038551135888195117,
16
+ "inference_throughput_async": 0.012065648016993093,
17
+ "inference_CO2_emissions": 3.8677334298918255e-05,
18
+ "inference_energy_consumption": 0.00013983032998305092
19
+ }
plots.png ADDED
smashed_results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "current_gpu_type": "Tesla T4",
3
+ "current_gpu_total_memory": 15095.0625,
4
+ "perplexity": 343688032.0,
5
+ "memory_inference_first": 344.0,
6
+ "memory_inference": 344.0,
7
+ "token_generation_latency_sync": 94.73909301757813,
8
+ "token_generation_latency_async": 94.9408158659935,
9
+ "token_generation_throughput_sync": 0.010555304765419883,
10
+ "token_generation_throughput_async": 0.010532877676251214,
11
+ "token_generation_CO2_emissions": 2.0721120007850446e-05,
12
+ "token_generation_energy_consumption": 0.004636361917186246,
13
+ "inference_latency_sync": 147.19097137451172,
14
+ "inference_latency_async": 94.34385299682617,
15
+ "inference_throughput_sync": 0.006793894969655487,
16
+ "inference_throughput_async": 0.010599524698589966,
17
+ "inference_CO2_emissions": 2.0695969686373567e-05,
18
+ "inference_energy_consumption": 7.278868671603012e-05
19
+ }