2021-punctuation-restoration / 2021-punctuation-restoration.py
asawczyn's picture
feat: add checksums
62d8f13
import os
import datasets
import pandas as pd
_CITATION = """"""
_DESCRIPTION = """\
This dataset is designed to be used in training models
that restore punctuation marks from the output of
Automatic Speech Recognition system for Polish language.
"""
_HOMEPAGE = "https://github.com/poleval/2021-punctuation-restoration"
_URL = "https://raw.githubusercontent.com/poleval/2021-punctuation-restoration/main"
_PATHS = {
"train": [os.path.join(_URL, "train", "in.tsv"), os.path.join(_URL, "train", "expected.tsv")],
"test-A": [os.path.join(_URL, "test-A", "in.tsv"), os.path.join(_URL, "test-A", "expected.tsv")],
}
_TO_DOWNLOAD = _PATHS["train"] + _PATHS["test-A"]
class PunctuationDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for AfrikaansNerCorpus"""
def __init__(self, **kwargs):
"""BuilderConfig for PunctuationDataset.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PunctuationDatasetConfig, self).__init__(**kwargs)
class PunctuationDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
PunctuationDatasetConfig(
name="punctuation_dataset",
version=datasets.Version("1.0.0"),
description="PunctuationDataset dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text_in": datasets.Value("string"),
"text_out": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
'B-.',
'B-,',
'B--',
'B-!',
'B-?',
'B-:',
'B-;',
'O',
]
)
)
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_paths = {key: dl_manager.download(urls) for key, urls in _PATHS.items()}
print(data_paths)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": data_paths["train"]}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepaths": data_paths["test-A"]}
),
]
def _generate_examples(self, filepaths):
in_df = pd.read_csv(os.path.join(filepaths[0]), sep='\t', header=None)
out_df = pd.read_csv(os.path.join(filepaths[1]), sep='\t', header=None)
for key, ((_, row_in), (_, row_out)) in enumerate(zip(in_df.iterrows(), out_df.iterrows()), 1):
text_in = PunctuationDataset._clean_text(row_in[1])
text_out = PunctuationDataset._clean_text(row_out[0])
tokens = []
tags = []
for token_in, token_out in zip(text_in.split(), text_out.split()):
assert token_in.lower() in token_out.lower()
tokens.append(token_in)
if token_in.lower() == token_out.lower():
tags.append('O')
else:
tags.append(f'B-{token_out[-1]}')
yield key, {
"text_in": text_in,
"text_out": text_out,
"tokens": tokens,
"tags": tags
}
@staticmethod
def _clean_text(text: str, lower: bool = False) -> str:
if lower:
text = text.lower()
text = text.replace(' -', '')
text = text.replace(' .', '')
text = text.replace(' ,', '')
text = text.replace(' ', ' ')
text = text.strip()
return text