Muennighoff commited on
Commit
65062c0
1 Parent(s): 72a7d56

Create inference_c4.py

Browse files
Files changed (1) hide show
  1. inference_c4.py +155 -0
inference_c4.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !pip install -q transformers datasets sentencepiece
2
+ import argparse
3
+ import gc
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+ import pandas as pd
9
+ import torch
10
+ from tqdm import tqdm
11
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
12
+
13
+ TOTAL_NUM_FILES_C4_TRAIN = 1024
14
+
15
+
16
+ def parse_args():
17
+ parser = argparse.ArgumentParser()
18
+
19
+ parser.add_argument(
20
+ "--start",
21
+ type=int,
22
+ required=True,
23
+ help="Starting file number to download. Valid values: 0 - 1023",
24
+ )
25
+ parser.add_argument(
26
+ "--end",
27
+ type=int,
28
+ required=True,
29
+ help="Ending file number to download. Valid values: 0 - 1023",
30
+ )
31
+ parser.add_argument("--batch_size", type=int, default=16, help="Batch size")
32
+ parser.add_argument(
33
+ "--model_name",
34
+ type=str,
35
+ default="taskydata/deberta-v3-base_10xp3nirstbbflanseuni_10xc4",
36
+ help="Model name",
37
+ )
38
+ parser.add_argument(
39
+ "--local_cache_location",
40
+ type=str,
41
+ default="c4_download",
42
+ help="local cache location from where the dataset will be loaded",
43
+ )
44
+ parser.add_argument(
45
+ "--use_local_cache_location",
46
+ type=bool,
47
+ default=True,
48
+ help="Set True if you want to load the dataset from local cache.",
49
+ )
50
+ parser.add_argument(
51
+ "--clear_dataset_cache",
52
+ type=bool,
53
+ default=False,
54
+ help="Set True if you want to delete the dataset files from the cache after inference.",
55
+ )
56
+ parser.add_argument(
57
+ "--release_memory",
58
+ type=bool,
59
+ default=True,
60
+ help="Set True if you want to release the memory of used variables.",
61
+ )
62
+
63
+ args = parser.parse_args()
64
+ return args
65
+
66
+
67
+ def chunks(l, n):
68
+ for i in range(0, len(l), n):
69
+ yield l[i : i + n]
70
+
71
+
72
+ def batch_tokenize(data, batch_size):
73
+ batches = list(chunks(data, batch_size))
74
+ tokenized_batches = []
75
+ for batch in batches:
76
+ # max_length will automatically be set to the max length of the model (512 for deberta)
77
+ tensor = tokenizer(
78
+ batch,
79
+ return_tensors="pt",
80
+ padding="max_length",
81
+ truncation=True,
82
+ max_length=512,
83
+ )
84
+ tokenized_batches.append(tensor)
85
+ return tokenized_batches, batches
86
+
87
+
88
+ def batch_inference(data, batch_size=16):
89
+ preds = []
90
+ tokenized_batches, batches = batch_tokenize(data, batch_size)
91
+ for i in tqdm(range(len(batches))):
92
+ with torch.no_grad():
93
+ logits = model(**tokenized_batches[i].to(device)).logits.cpu()
94
+ preds.extend(logits)
95
+ return preds
96
+
97
+
98
+ if __name__ == "__main__":
99
+ args = parse_args()
100
+
101
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name)
102
+ model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
103
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
104
+ model.to(device)
105
+
106
+
107
+ if args.use_local_cache_location:
108
+ file_name = f"c4-train.{global_id}.json.gz"
109
+ data_files = {"train": f"{args.local_cache_location}/{file_name}"}
110
+ c4 = datasets.load_dataset("json", data_files=data_files, split="train")
111
+ else:
112
+ file_name = f"en/c4-train.{global_id}.json.gz"
113
+ data_files = {"train": file_name}
114
+ c4 = datasets.load_dataset(
115
+ "allenai/c4", data_files=data_files, split="train"
116
+ )
117
+ df = pd.DataFrame(c4, index=None)
118
+ texts = df["text"].to_list()
119
+ preds = batch_inference(texts, batch_size=args.batch_size)
120
+
121
+ assert len(preds) == len(texts)
122
+
123
+ # Write two jsonl files:
124
+ # 1) Probas for all of C4
125
+ # 2) Probas + texts for samples predicted as tasky
126
+ df['timestamp'] = df['timestamp'].astype(str)
127
+ with open(c4taskyprobas_path, "w") as f, open(c4tasky_path, "w") as g:
128
+ for i in range(len(preds)):
129
+ predicted_class_id = preds[i].argmax().item()
130
+ pred = model.config.id2label[predicted_class_id]
131
+ tasky_proba = torch.softmax(preds[i], dim=-1)[-1].item()
132
+ f.write(json.dumps({"proba": tasky_proba}) + "\n")
133
+ # If it's tasky, save!
134
+ if int(predicted_class_id) == 1:
135
+ g.write(
136
+ json.dumps(
137
+ {
138
+ "proba": tasky_proba,
139
+ "text": texts[i],
140
+ "timestamp": df["timestamp"][i],
141
+ "url": df["url"][i],
142
+ }
143
+ )
144
+ + "\n"
145
+ )
146
+ # release memory
147
+ if args.release_memory:
148
+ del preds
149
+ del texts
150
+ del df
151
+ gc.collect()
152
+
153
+ # Delete the processed dataset file from the cache
154
+ if args.clear_dataset_cache:
155
+ os.remove(f"{args.local_cache_location}/{file_name}")