File size: 4,815 Bytes
65062c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
# !pip install -q transformers datasets sentencepiece
import argparse
import gc
import json
import os

import datasets
import pandas as pd
import torch
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification, AutoTokenizer

TOTAL_NUM_FILES_C4_TRAIN = 1024


def parse_args():
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "--start",
        type=int,
        required=True,
        help="Starting file number to download. Valid values: 0 - 1023",
    )
    parser.add_argument(
        "--end",
        type=int,
        required=True,
        help="Ending file number to download. Valid values: 0 - 1023",
    )
    parser.add_argument("--batch_size", type=int, default=16, help="Batch size")
    parser.add_argument(
        "--model_name",
        type=str,
        default="taskydata/deberta-v3-base_10xp3nirstbbflanseuni_10xc4",
        help="Model name",
    )
    parser.add_argument(
        "--local_cache_location",
        type=str,
        default="c4_download",
        help="local cache location from where the dataset will be loaded",
    )
    parser.add_argument(
        "--use_local_cache_location",
        type=bool,
        default=True,
        help="Set True if you want to load the dataset from local cache.",
    )
    parser.add_argument(
        "--clear_dataset_cache",
        type=bool,
        default=False,
        help="Set True if you want to delete the dataset files from the cache after inference.",
    )
    parser.add_argument(
        "--release_memory",
        type=bool,
        default=True,
        help="Set True if you want to release the memory of used variables.",
    )

    args = parser.parse_args()
    return args


def chunks(l, n):
    for i in range(0, len(l), n):
        yield l[i : i + n]


def batch_tokenize(data, batch_size):
    batches = list(chunks(data, batch_size))
    tokenized_batches = []
    for batch in batches:
        # max_length will automatically be set to the max length of the model (512 for deberta)
        tensor = tokenizer(
            batch,
            return_tensors="pt",
            padding="max_length",
            truncation=True,
            max_length=512,
        )
        tokenized_batches.append(tensor)
    return tokenized_batches, batches


def batch_inference(data, batch_size=16):
    preds = []
    tokenized_batches, batches = batch_tokenize(data, batch_size)
    for i in tqdm(range(len(batches))):
        with torch.no_grad():
            logits = model(**tokenized_batches[i].to(device)).logits.cpu()
        preds.extend(logits)
    return preds


if __name__ == "__main__":
    args = parse_args()

    tokenizer = AutoTokenizer.from_pretrained(args.model_name)
    model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    model.to(device)


    if args.use_local_cache_location:
        file_name = f"c4-train.{global_id}.json.gz"
        data_files = {"train": f"{args.local_cache_location}/{file_name}"}
        c4 = datasets.load_dataset("json", data_files=data_files, split="train")
    else:
        file_name = f"en/c4-train.{global_id}.json.gz"
        data_files = {"train": file_name}
        c4 = datasets.load_dataset(
            "allenai/c4", data_files=data_files, split="train"
        )
    df = pd.DataFrame(c4, index=None)
    texts = df["text"].to_list()
    preds = batch_inference(texts, batch_size=args.batch_size)

    assert len(preds) == len(texts)

    # Write two jsonl files:
    # 1) Probas for all of C4
    # 2) Probas + texts for samples predicted as tasky
    df['timestamp'] = df['timestamp'].astype(str)
    with open(c4taskyprobas_path, "w") as f, open(c4tasky_path, "w") as g:
        for i in range(len(preds)):
            predicted_class_id = preds[i].argmax().item()
            pred = model.config.id2label[predicted_class_id]
            tasky_proba = torch.softmax(preds[i], dim=-1)[-1].item()
            f.write(json.dumps({"proba": tasky_proba}) + "\n")
            # If it's tasky, save!
            if int(predicted_class_id) == 1:
                g.write(
                    json.dumps(
                        {
                            "proba": tasky_proba,
                            "text": texts[i],
                            "timestamp": df["timestamp"][i],
                            "url": df["url"][i],
                        }
                    )
                    + "\n"
                )
    # release memory
    if args.release_memory:
        del preds
        del texts
        del df
        gc.collect()

    # Delete the processed dataset file from the cache
    if args.clear_dataset_cache:
        os.remove(f"{args.local_cache_location}/{file_name}")