KennethEnevoldsen's picture
Create create.py
23efc67
from datasets import Dataset, load_dataset
ds: Dataset = load_dataset("DDSC/partial-danish-gigaword-no-twitter") # type: ignore
ds = ds["train"]
# filter to only include spontaneous speech
ds = ds.filter(lambda x: x["source"] == "spont", num_proc=6)
texts = ds["text"]
def remove_taler(text):
if text.startswith("Taler"):
text = text.split(":")[1:]
text = ":".join(text)
return text.strip()
qa_pairs = []
for text in texts:
lines = text.split("\n")
lines = [remove_taler(line) for line in lines]
questions = [
(i, text)
for i, text in enumerate(lines)
if len(text.split(" ")) > 7 and text.endswith("?")
]
qa_pairs_ = [{"question": lines[i], "answer": lines[i + 1]} for i, _ in questions]
qa_pairs += qa_pairs_
# filter qa pairs
def get_length_of_pair(qa: dict):
return len(qa["question"].split(" ")) + len(qa["answer"].split(" "))
def get_min_length_of_pair(qa: dict):
return min(len(qa["question"].split(" ")), len(qa["answer"].split(" ")))
qa_pairs = [
qa
for qa in qa_pairs
if get_length_of_pair(qa) < 20 and get_min_length_of_pair(qa) > 4
]
# create dataset
qa_ds = Dataset.from_list(qa_pairs)
# add readme
qa_ds.info.description = """# Spontanous speech QA
This dataset contains QA pairs from the spontaneous speech subsection of the Danish Gigaword.
The dataset is created from the [DDSC dataset](DDSC/partial-danish-gigaword-no-twitter) and
filtered to only include QA pairs where the question is less than 20 tokens and the answer is
at least 4 tokens long.
To find out more about the creation see the accompanying script.
"""
qa_ds.info.license = ds[0]["LICENSE"]
qa_ds.info.dataset_name = "Spontanous Speech QA"
# split dataset
qa_ds = qa_ds.train_test_split(test_size=0.2)
# upload dataset
qa_ds.push_to_hub("spontanous-speech-qa")