File size: 4,565 Bytes
5c627fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f6d058
5c627fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
511c4b4
5c627fa
 
 
0f6d058
 
 
 
 
 
5c627fa
 
 
 
 
 
0f6d058
 
 
 
5c627fa
 
 
 
 
 
 
 
 
 
0f6d058
5c627fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# !pip install -q transformers datasets sentencepiece
import argparse
import gc
import json
import os

import datasets
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification, AutoTokenizer

TOTAL_NUM_FILES_C4_TRAIN = 1024


def parse_args():
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "--start",
        type=int,
        required=True,
        help="Starting file number to download. Valid values: 0 - 1023",
    )
    parser.add_argument(
        "--end",
        type=int,
        required=True,
        help="Ending file number to download. Valid values: 0 - 1023",
    )
    parser.add_argument("--batch_size", type=int, default=8, help="Batch size")
    parser.add_argument(
        "--model_name",
        type=str,
        default="taskydata/deberta-v3-base_10xp3nirstbbflanseuni_10xc4",
        help="Model name",
    )
    parser.add_argument(
        "--local_cache_location",
        type=str,
        default="c4_download",
        help="local cache location from where the dataset will be loaded",
    )
    parser.add_argument(
        "--use_local_cache_location",
        type=bool,
        default=True,
        help="Set True if you want to load the dataset from local cache.",
    )
    parser.add_argument(
        "--clear_dataset_cache",
        type=bool,
        default=False,
        help="Set True if you want to delete the dataset files from the cache after inference.",
    )
    parser.add_argument(
        "--release_memory",
        type=bool,
        default=True,
        help="Set True if you want to release the memory of used variables.",
    )

    args = parser.parse_args()
    return args


def chunks(l, n):
    for i in range(0, len(l), n):
        yield l[i : i + n]


def batch_tokenize(data, batch_size):
    batches = list(chunks(data, batch_size))
    tokenized_batches = []
    for batch in batches:
        # max_length will automatically be set to the max length of the model (512 for deberta)
        tensor = tokenizer(
            batch,
            return_tensors="pt",
            padding="max_length",
            truncation=True,
            max_length=512,
        )
        tokenized_batches.append(tensor)
    return tokenized_batches, batches


def batch_inference(data, batch_size=32):
    preds = []
    tokenized_batches, batches = batch_tokenize(data, batch_size)
    for i in tqdm(range(len(batches))):
        with torch.no_grad():
            logits = model(**tokenized_batches[i].to(device)).logits.cpu()
        preds.extend(logits)
    return preds


if __name__ == "__main__":
    args = parse_args()

    tasky_commits_path = f"tasky_commits_python_{args.start}_{args.end}.jsonl"
    if os.path.exists(f"python_add/{tasky_commits_path}"):
        print("Exists:", tasky_commits_path)
        exit()

    path = "python_add_messages.jsonl"
    ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
    if args.start > len(ds): exit()
    ds = ds[range(args.start, min(args.end, len(ds)))]
    df = pd.DataFrame(ds, index=None)

    tokenizer = AutoTokenizer.from_pretrained(args.model_name)
    model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    model.to(device)
    model.eval()

    #path = "python_add_messages.jsonl"
    #ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
    #ds = ds[range(args.start, min(args.end, len(ds)))]
    #df = pd.DataFrame(ds, index=None)

    texts = df["message"].to_list()
    commits = df["commit"].to_list()
    preds = batch_inference(texts, batch_size=args.batch_size)

    assert len(preds) == len(texts)

    # Write two jsonl files:
    # 1) Probas for all of C4
    # 2) Probas + texts for samples predicted as tasky
    tasky_commits_path = f"python_add/tasky_commits_python_{args.start}_{args.end}.jsonl"

    with open(tasky_commits_path, "w") as f:
        for i in range(len(preds)):
            predicted_class_id = preds[i].argmax().item()
            pred = model.config.id2label[predicted_class_id]
            tasky_proba = torch.softmax(preds[i], dim=-1)[-1].item()

            f.write(
                json.dumps(
                    {
                        "commit": commits[i],
                        "message": texts[i],
                        "proba": tasky_proba,
                    }
                )
                + "\n"
            )