ruczu commited on
Commit
a034668
1 Parent(s): a1d490f

Add loading script

Browse files
Files changed (1) hide show
  1. 2021-punctuation-restoration.py +123 -0
2021-punctuation-restoration.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+ import pandas as pd
5
+
6
+
7
+ _CITATION = """"""
8
+
9
+ _DESCRIPTION = """\
10
+ This dataset is designed to be used in training models
11
+ that restore punctuation marks from the output of
12
+ Automatic Speech Recognition system for Polish language.
13
+ """
14
+
15
+ _HOMEPAGE = "https://github.com/poleval/2021-punctuation-restoration"
16
+
17
+ _URL = "https://huggingface.co/datasets/lruczu/2021-punctuation-restoration/resolve/main"
18
+
19
+ _PATHS = {
20
+ "train": os.path.join(_URL, "train"),
21
+ "test-A": os.path.join(_URL, "test-A"),
22
+ }
23
+
24
+
25
+ class PunctuationDatasetConfig(datasets.BuilderConfig):
26
+ """BuilderConfig for AfrikaansNerCorpus"""
27
+
28
+ def __init__(self, **kwargs):
29
+ """BuilderConfig for PunctuationDataset.
30
+ Args:
31
+ **kwargs: keyword arguments forwarded to super.
32
+ """
33
+ super(PunctuationDatasetConfig, self).__init__(**kwargs)
34
+
35
+
36
+ class PunctuationDataset(datasets.GeneratorBasedBuilder):
37
+ """TODO: Short description of my dataset."""
38
+
39
+ VERSION = datasets.Version("1.0.0")
40
+
41
+ BUILDER_CONFIGS = [
42
+ PunctuationDatasetConfig(
43
+ name="punctuation_dataset",
44
+ version=datasets.Version("1.0.0"),
45
+ description="PunctuationDataset dataset",
46
+ ),
47
+ ]
48
+
49
+ def _info(self):
50
+ return datasets.DatasetInfo(
51
+ description=_DESCRIPTION,
52
+ features=datasets.Features(
53
+ {
54
+ "text_in": datasets.Value("string"),
55
+ "text_out": datasets.Value("string"),
56
+ "tokens": datasets.Sequence(datasets.Value("string")),
57
+ "tags": datasets.Sequence(
58
+ datasets.features.ClassLabel(
59
+ names=[
60
+ 'B-.',
61
+ 'B-,',
62
+ 'B--',
63
+ 'B-!',
64
+ 'B-?',
65
+ 'B-:',
66
+ 'B-;',
67
+ 'O',
68
+ ]
69
+ )
70
+ )
71
+ }),
72
+ supervised_keys=None,
73
+ homepage=_HOMEPAGE,
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+ return [
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": _PATHS["train"]}
81
+ ),
82
+ datasets.SplitGenerator(
83
+ name=datasets.Split.TEST, gen_kwargs={"filepath": _PATHS["test-A"]}
84
+ ),
85
+ ]
86
+
87
+ def _generate_examples(self, filepath):
88
+ in_df = pd.read_csv(os.path.join(filepath, "in.tsv"), sep='\t', header=None)
89
+ out_df = pd.read_csv(os.path.join(filepath, 'expected.tsv'), sep='\t', header=None)
90
+
91
+ for key, ((_, row_in), (_, row_out)) in enumerate(zip(in_df.iterrows(), out_df.iterrows()), 1):
92
+
93
+ text_in = PunctuationDataset._clean_text(row_in[1])
94
+ text_out = PunctuationDataset._clean_text(row_out[0])
95
+
96
+ tokens = []
97
+ tags = []
98
+ for token_in, token_out in zip(text_in.split(), text_out.split()):
99
+ assert token_in.lower() in token_out.lower()
100
+
101
+ tokens.append(token_in)
102
+ if token_in.lower() == token_out.lower():
103
+ tags.append('O')
104
+ else:
105
+ tags.append(f'B-{token_out[-1]}')
106
+
107
+ yield key, {
108
+ "text_in": text_in,
109
+ "text_out": text_out,
110
+ "tokens": tokens,
111
+ "tags": tags
112
+ }
113
+
114
+ @staticmethod
115
+ def _clean_text(text: str, lower: bool = False) -> str:
116
+ if lower:
117
+ text = text.lower()
118
+ text = text.replace(' -', '')
119
+ text = text.replace(' .', '')
120
+ text = text.replace(' ,', '')
121
+ text = text.replace(' ', ' ')
122
+ text = text.strip()
123
+ return text