rojagtap commited on
Commit
1258a21
1 Parent(s): 129e6cb

Upload clean.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. clean.py +105 -0
clean.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ {
3
+ "document": "",
4
+ "question": "",
5
+ "long_answer_candidates": ["", "", ""],
6
+ "long_answer_candidate_index": 0,
7
+ "short_answers": ["", "", ""]
8
+ }
9
+ """
10
+
11
+
12
+ import sys
13
+ import jsonlines
14
+ from datasets import load_dataset
15
+ from huggingface_hub import HfApi
16
+
17
+
18
+ def clean(raw, path):
19
+ fp = open(path, "a")
20
+ writer = jsonlines.Writer(fp)
21
+
22
+ count = 0
23
+ dataset = []
24
+ for data in raw:
25
+ try:
26
+ document = ""
27
+
28
+ startmax, endmax = max(data["document"]["tokens"]["start_byte"]), max(data["document"]["tokens"]["end_byte"])
29
+
30
+ start2token, end2start = [-1] * (startmax + 1), [-1] * (endmax + 1)
31
+
32
+ tokens = data["document"]["tokens"]
33
+ for i in range(len(tokens["token"])):
34
+ start2token[tokens["start_byte"][i]] = {
35
+ "token": tokens["token"][i],
36
+ "is_html": tokens["is_html"][i]
37
+ }
38
+
39
+ end2start[tokens["end_byte"][i]] = tokens["start_byte"][i]
40
+
41
+ if not(tokens["is_html"][i]):
42
+ document += tokens["token"][i] + " "
43
+
44
+ candidates = []
45
+ for i in range(len(data["long_answer_candidates"]["start_byte"])):
46
+ candidates.append(" ".join(start2token[j]["token"] for j in range(data["long_answer_candidates"]["start_byte"][i], end2start[data["long_answer_candidates"]["end_byte"][i]]) if (start2token[j] != -1) and not(start2token[j]["is_html"])))
47
+
48
+ short_answers = list(map(lambda x: x["text"][0] if x["text"] else "", data["annotations"]["short_answers"]))
49
+
50
+ dataset.append({
51
+ "id": data["id"],
52
+ "document": document,
53
+ "question": data["question"]["text"],
54
+ "long_answer_candidates": candidates,
55
+ "long_answer_candidate_index": data["annotations"]["long_answer"][0]["candidate_index"],
56
+ "short_answers": short_answers
57
+ })
58
+ except Exception as ex:
59
+ # raise ex
60
+ print("Exception: " + str(ex))
61
+
62
+ if (count + 1) % 1000 == 0:
63
+ writer.write_all(dataset)
64
+ dataset = []
65
+
66
+ print("Done: " + str(count), end="\r")
67
+ count += 1
68
+
69
+ if dataset:
70
+ writer.write_all(dataset)
71
+
72
+ writer.close()
73
+ fp.close()
74
+
75
+
76
+
77
+ if __name__ == "__main__":
78
+ if len(sys.argv) < 1:
79
+ raise AttributeError("Missing required argument: repository id")
80
+
81
+ repo = sys.argv[1]
82
+
83
+ api = HfApi()
84
+
85
+ train = load_dataset("natural_questions", split="train", streaming=True)
86
+ train_path = "data/train.jsonl"
87
+ clean(train, train_path)
88
+
89
+ api.upload_file(
90
+ path_or_fileobj=train_path,
91
+ path_in_repo="train.jsonl",
92
+ repo_id=repo,
93
+ repo_type="dataset",
94
+ )
95
+
96
+ val = load_dataset("natural_questions", split="validation", streaming=True)
97
+ val_path = "data/validation.jsonl"
98
+ clean(val, val_path)
99
+
100
+ api.upload_file(
101
+ path_or_fileobj=val_path,
102
+ path_in_repo="validation.jsonl",
103
+ repo_id=repo,
104
+ repo_type="dataset",
105
+ )