File size: 3,585 Bytes
279e63d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import os
import json
from huggingface_hub import HfApi
import glob
from datetime import datetime
from datasets import Dataset

TOKEN = os.environ.get("HF_WRITE_TOKEN")
API = HfApi(token=TOKEN)
REPO_ID = "meg/calculate_carbon_runs"
UPLOAD_REPO_ID = 'meg/HUGS_energy'

output_directory = API.snapshot_download(repo_id=REPO_ID, repo_type='dataset')
print(output_directory)
#runs_dir = glob.glob(f"{output_directory}/*")
#print(runs_dir)
dataset_results = []
for task in ['text_generation']:
    hardware_dirs = glob.glob(f"{output_directory}/runs/{task}/*")
    print(hardware_dirs)
    for hardware_dir in hardware_dirs:
        hardware = hardware_dir.split("/")[-1]
        org_dirs = glob.glob(f"{hardware_dir}/*") #runs/{task}/*")
        print(org_dirs)
        for org_dir in org_dirs:
            org = org_dir.split("/")[-1]
            model_dirs = glob.glob(f"{org_dir}/*")
            print(model_dirs)
            for model_dir in model_dirs:
                model = model_dir.split("/")[-1]
                model_runs = glob.glob(f"{model_dir}/*")
                dates = [dir.split("/")[-1] for dir in model_runs]
                try:
                    # Sort dates as dates
                    sorted_dates = sorted(
                        [datetime.strptime(date, '%Y-%m-%d-%H-%M-%S') for date in
                         dates])
                    # Convert back to string format
                    sorted_dates_str = [date.strftime('%Y-%m-%d-%H-%M-%S') for date in
                                        sorted_dates]
                    last_date = sorted_dates_str[-1]
                    most_recent_run = f"{model_dir}/{last_date}"
                    print(most_recent_run)
                    try:
                        benchmark_report = json.loads(open(f"{most_recent_run}/benchmark_report.json", "rb+").read())
                        print(benchmark_report)
                        prefill_data = benchmark_report['prefill']
                        prefill_energy = prefill_data['energy']
                        prefill_efficiency = prefill_data['efficiency']
                        decode_data = benchmark_report['decode']
                        decode_energy = decode_data['energy']
                        decode_efficiency = decode_data['efficiency']
                        preprocess_data = benchmark_report['preprocess']
                        preprocess_energy = preprocess_data['energy']
                        preprocess_efficiency = preprocess_data['efficiency']
                        dataset_results += [{'task':task, 'org':org, 'model':model, 'hardware':hardware,
                                             'date':last_date, 'prefill':{'energy':prefill_energy,
                                                                        'efficency':prefill_efficiency},
                                                                'decode':{'energy':decode_energy, 'efficiency':decode_efficiency},
                                                                'preprocess': {'energy':preprocess_energy, 'efficiency': preprocess_efficiency}},]

                    except FileNotFoundError:
                        error_report = open(f"{most_recent_run}/error.log", "rb+").read()
                        print(error_report)
                except ValueError:
                    # Not a directory with a timestamp.
                    continue

print("*****")
print(dataset_results)
hub_dataset_results = Dataset.from_list(dataset_results)
print(hub_dataset_results)
hub_dataset_results.push_to_hub(UPLOAD_REPO_ID, token=TOKEN)