|
""" |
|
{ |
|
"document": "", |
|
"question": "", |
|
"long_answer_candidates": ["", "", ""], |
|
"long_answer_candidate_index": 0, |
|
"short_answers": ["", "", ""] |
|
} |
|
""" |
|
|
|
|
|
import sys |
|
import jsonlines |
|
from datasets import load_dataset |
|
from huggingface_hub import HfApi |
|
|
|
|
|
def clean(raw, path): |
|
fp = open(path, "a") |
|
writer = jsonlines.Writer(fp) |
|
|
|
count = 0 |
|
dataset = [] |
|
for data in raw: |
|
try: |
|
document = "" |
|
|
|
startmax, endmax = max(data["document"]["tokens"]["start_byte"]), max(data["document"]["tokens"]["end_byte"]) |
|
|
|
start2token, end2start = [-1] * (startmax + 1), [-1] * (endmax + 1) |
|
|
|
tokens = data["document"]["tokens"] |
|
for i in range(len(tokens["token"])): |
|
start2token[tokens["start_byte"][i]] = { |
|
"token": tokens["token"][i], |
|
"is_html": tokens["is_html"][i] |
|
} |
|
|
|
end2start[tokens["end_byte"][i]] = tokens["start_byte"][i] |
|
|
|
if not(tokens["is_html"][i]): |
|
document += tokens["token"][i] + " " |
|
|
|
candidates = [] |
|
for i in range(len(data["long_answer_candidates"]["start_byte"])): |
|
candidates.append(" ".join(start2token[j]["token"] for j in range(data["long_answer_candidates"]["start_byte"][i], end2start[data["long_answer_candidates"]["end_byte"][i]]) if (start2token[j] != -1) and not(start2token[j]["is_html"]))) |
|
|
|
short_answers = list(map(lambda x: x["text"][0] if x["text"] else "", data["annotations"]["short_answers"])) |
|
|
|
dataset.append({ |
|
"id": data["id"], |
|
"document": document, |
|
"question": data["question"]["text"], |
|
"long_answer_candidates": candidates, |
|
"long_answer_candidate_index": data["annotations"]["long_answer"][0]["candidate_index"], |
|
"short_answers": short_answers |
|
}) |
|
except Exception as ex: |
|
|
|
print("Exception: " + str(ex)) |
|
|
|
if (count + 1) % 1000 == 0: |
|
writer.write_all(dataset) |
|
dataset = [] |
|
|
|
print("Done: " + str(count), end="\r") |
|
count += 1 |
|
|
|
if dataset: |
|
writer.write_all(dataset) |
|
|
|
writer.close() |
|
fp.close() |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
if len(sys.argv) < 1: |
|
raise AttributeError("Missing required argument: repository id") |
|
|
|
repo = sys.argv[1] |
|
|
|
api = HfApi() |
|
|
|
train = load_dataset("natural_questions", split="train", streaming=True) |
|
train_path = "data/train.jsonl" |
|
clean(train, train_path) |
|
|
|
api.upload_file( |
|
path_or_fileobj=train_path, |
|
path_in_repo="raw/train.jsonl", |
|
repo_id=repo, |
|
repo_type="dataset", |
|
) |
|
|
|
val = load_dataset("natural_questions", split="validation", streaming=True) |
|
val_path = "data/validation.jsonl" |
|
clean(val, val_path) |
|
|
|
api.upload_file( |
|
path_or_fileobj=val_path, |
|
path_in_repo="raw/validation.jsonl", |
|
repo_id=repo, |
|
repo_type="dataset", |
|
) |