Datasets:

Languages:
Russian
Multilinguality:
monolingual
Size Categories:
10K<n<20k
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
ArXiv:
Tags:
spellcheck
russian
License:
NikitaMartynov commited on
Commit
47539e6
1 Parent(s): 52ccd53
Files changed (1) hide show
  1. spellcheck_benchmark.py +217 -0
spellcheck_benchmark.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The Russian Spellcheck Benchmark"""
18
+
19
+ import os
20
+ import json
21
+ import pandas as pd
22
+ from typing import List, Dict, Optional
23
+
24
+ import datasets
25
+
26
+
27
+ _RUSSIAN_SPELLCHECK_BENCHMARK_DESCRIPTION = """
28
+ Russian Spellcheck Benchmark is a new benchmark for spelling correction in Russian language.
29
+ It includes four datasets, each of which consists of pairs of sentences in Russian language.
30
+ Each pair embodies sentence, which may contain spelling errors, and its corresponding correction.
31
+ Datasets were gathered from various sources and domains including social networks, internet blogs, github commits,
32
+ medical anamnesis, literature, news, reviews and more.
33
+ """
34
+
35
+ _MULTIDOMAIN_GOLD_DESCRIPTION = """
36
+ MultidomainGold is a dataset of 3500 sentence pairs
37
+ dedicated to a problem of automatic spelling correction in Russian language.
38
+ The dataset is gathered from seven different domains including news, Russian classic literature,
39
+ social media texts, open web, strategic documents, subtitles and reviews.
40
+ It has been passed through two-stage manual labeling process with native speakers as annotators
41
+ to correct spelling violation and preserve original style of text at the same time.
42
+ """
43
+
44
+ _GITHUB_TYPO_CORPUS_RU_DESCRIPTION = """
45
+ GitHubTypoCorpusRu is a manually labeled part of GitHub Typo Corpus https://arxiv.org/abs/1911.12893.
46
+ The sentences with "ru" tag attached to them have been extracted from GitHub Typo Corpus
47
+ and pass them through manual labeling to ensure the corresponding corrections are right.
48
+ """
49
+
50
+ _RUSPELLRU_DESCRIPTION = """
51
+ RUSpellRU is a first benchmark on the task of automatic spelling correction for Russian language
52
+ introduced in https://www.dialog-21.ru/media/3427/sorokinaaetal.pdf.
53
+ Original sentences are drawn from social media domain and labeled by
54
+ human annotators.
55
+ """
56
+
57
+ _MEDSPELLCHECK_DESCRIPTION = """
58
+ The dataset is taken from GitHub repo associated with eponymos project https://github.com/DmitryPogrebnoy/MedSpellChecker.
59
+ Original sentences are taken from anonymized medical anamnesis and passed through
60
+ two-stage manual labeling pipeline.
61
+ """
62
+
63
+ _RUSSIAN_SPELLCHECK_BENCHMARK_CITATION = """ # TODO: add citation"""
64
+
65
+ _MULTIDOMAIN_GOLD_CITATION = """ # TODO: add citation from Dialog"""
66
+
67
+ _GITHUB_TYPO_CORPUS_RU_CITATION = """
68
+ @article{DBLP:journals/corr/abs-1911-12893,
69
+ author = {Masato Hagiwara and
70
+ Masato Mita},
71
+ title = {GitHub Typo Corpus: {A} Large-Scale Multilingual Dataset of Misspellings
72
+ and Grammatical Errors},
73
+ journal = {CoRR},
74
+ volume = {abs/1911.12893},
75
+ year = {2019},
76
+ url = {http://arxiv.org/abs/1911.12893},
77
+ eprinttype = {arXiv},
78
+ eprint = {1911.12893},
79
+ timestamp = {Wed, 08 Jan 2020 15:28:22 +0100},
80
+ biburl = {https://dblp.org/rec/journals/corr/abs-1911-12893.bib},
81
+ bibsource = {dblp computer science bibliography, https://dblp.org}
82
+ }
83
+ """
84
+
85
+ _RUSPELLRU_CITATION = """
86
+ @inproceedings{Shavrina2016SpellRuevalT,
87
+ title={SpellRueval : the FiRSt Competition on automatiC Spelling CoRReCtion FoR RuSSian},
88
+ author={Tatiana Shavrina and Россия Москва and Москва Яндекс and Россия and Россия Долгопрудный},
89
+ year={2016}
90
+ }
91
+ """
92
+
93
+ _LICENSE = "apache-2.0"
94
+
95
+
96
+ class RussianSpellcheckBenchmarkConfig(datasets.BuilderConfig):
97
+ """BuilderConfig for RussianSpellcheckBenchmark."""
98
+
99
+ def __init__(
100
+ self,
101
+ data_urls: Dict[str,str],
102
+ features: List[str],
103
+ citation: str,
104
+ **kwargs,
105
+ ):
106
+ """BuilderConfig for RussianSpellcheckBenchmark.
107
+ Args:
108
+ features: *list[string]*, list of the features that will appear in the
109
+ feature dict. Should not include "label".
110
+ data_urls: *dict[string]*, urls to download the zip file from.
111
+ **kwargs: keyword arguments forwarded to super.
112
+ """
113
+ super(RussianSpellcheckBenchmarkConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs)
114
+ self.data_urls = data_urls
115
+ self.features = features
116
+ self.citation = citation
117
+
118
+
119
+ class RussianSpellcheckBenchmark(datasets.GeneratorBasedBuilder):
120
+ """Russian Spellcheck Benchmark."""
121
+
122
+ BUILDER_CONFIGS = [
123
+ RussianSpellcheckBenchmarkConfig(
124
+ name="GitHubTypoCorpusRu",
125
+ description=_GITHUB_TYPO_CORPUS_RU_DESCRIPTION,
126
+ data_urls={
127
+ "test": "data/GitHubTypoCorpusRu/test.json",
128
+ },
129
+ features=["source", "correction", "domain"],
130
+ citation=_GITHUB_TYPO_CORPUS_RU_CITATION,
131
+ ),
132
+ RussianSpellcheckBenchmarkConfig(
133
+ name="MedSpellchecker",
134
+ description=_MEDSPELLCHECK_DESCRIPTION,
135
+ data_urls={
136
+ "test": "data/MedSpellchecker/test.json",
137
+ },
138
+ features=["source", "correction", "domain"],
139
+ citation="",
140
+ ),
141
+ RussianSpellcheckBenchmarkConfig(
142
+ name="MultidomainGold",
143
+ description=_MULTIDOMAIN_GOLD_DESCRIPTION,
144
+ data_urls={
145
+ "train": "data/MultidomainGold/train.json",
146
+ "test": "data/MultidomainGold/test.json",
147
+ },
148
+ features=["source", "correction", "domain"],
149
+ citation=_MULTIDOMAIN_GOLD_CITATION,
150
+ ),
151
+ RussianSpellcheckBenchmarkConfig(
152
+ name="RUSpellRU",
153
+ description=_RUSPELLRU_DESCRIPTION,
154
+ data_urls={
155
+ "test": "data/RUSpellRU/test.json",
156
+ "train": "data/RUSpellRU/train.json",
157
+ },
158
+ features=["source", "correction", "domain"],
159
+ citation=_RUSPELLRU_CITATION,
160
+ ),
161
+ ]
162
+
163
+ def _info(self) -> datasets.DatasetInfo:
164
+ features = {
165
+ "source": datasets.Value("string"),
166
+ "correction": datasets.Value("string"),
167
+ "domain": datasets.Value("string"),
168
+ }
169
+
170
+ return datasets.DatasetInfo(
171
+ features=datasets.Features(features),
172
+ description=_RUSSIAN_SPELLCHECK_BENCHMARK_DESCRIPTION + self.config.description,
173
+ license=_LICENSE,
174
+ citation=self.config.citation + "\n" + _RUSSIAN_SPELLCHECK_BENCHMARK_CITATION,
175
+ )
176
+
177
+ def _split_generators(
178
+ self, dl_manager: datasets.DownloadManager
179
+ ) -> List[datasets.SplitGenerator]:
180
+ urls_to_download = self.config.data_urls
181
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
182
+ if self.config.name == "GitHubTypoCorpusRu" or \
183
+ self.config.name == "MedSpellchecker":
184
+ return [
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.TEST,
187
+ gen_kwargs={
188
+ "data_file": downloaded_files["test"],
189
+ "split": datasets.Split.TEST,
190
+ },
191
+ )
192
+ ]
193
+ return [
194
+ datasets.SplitGenerator(
195
+ name=datasets.Split.TRAIN,
196
+ gen_kwargs={
197
+ "data_file": downloaded_files["train"],
198
+ "split": datasets.Split.TRAIN,
199
+ },
200
+ ),
201
+ datasets.SplitGenerator(
202
+ name=datasets.Split.TEST,
203
+ gen_kwargs={
204
+ "data_file": downloaded_files["test"],
205
+ "split": datasets.Split.TEST,
206
+ },
207
+ )
208
+ ]
209
+
210
+ def _generate_examples(self, data_file, split):
211
+ with open(data_file, encoding="utf-8") as f:
212
+ key = 0
213
+ for line in f:
214
+ row = json.loads(line)
215
+ example = {feature: row[feature] for feature in self.config.features}
216
+ yield key, example
217
+ key += 1