Datasets:
Tasks:
Text Generation
Languages:
Russian
Multilinguality:
monolingual
Size Categories:
10K<n<20k
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
ArXiv:
License:
# coding=utf-8 | |
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Lint as: python3 | |
"""The Russian Spellcheck Benchmark""" | |
import os | |
import json | |
import pandas as pd | |
from typing import List, Dict, Optional | |
import datasets | |
_RUSSIAN_SPELLCHECK_BENCHMARK_DESCRIPTION = """ | |
Russian Spellcheck Benchmark is a new benchmark for spelling correction in Russian language. | |
It includes four datasets, each of which consists of pairs of sentences in Russian language. | |
Each pair embodies sentence, which may contain spelling errors, and its corresponding correction. | |
Datasets were gathered from various sources and domains including social networks, internet blogs, github commits, | |
medical anamnesis, literature, news, reviews and more. | |
""" | |
_MULTIDOMAIN_GOLD_DESCRIPTION = """ | |
MultidomainGold is a dataset of 3500 sentence pairs | |
dedicated to a problem of automatic spelling correction in Russian language. | |
The dataset is gathered from seven different domains including news, Russian classic literature, | |
social media texts, open web, strategic documents, subtitles and reviews. | |
It has been passed through two-stage manual labeling process with native speakers as annotators | |
to correct spelling violation and preserve original style of text at the same time. | |
""" | |
_GITHUB_TYPO_CORPUS_RU_DESCRIPTION = """ | |
GitHubTypoCorpusRu is a manually labeled part of GitHub Typo Corpus https://arxiv.org/abs/1911.12893. | |
The sentences with "ru" tag attached to them have been extracted from GitHub Typo Corpus | |
and pass them through manual labeling to ensure the corresponding corrections are right. | |
""" | |
_RUSPELLRU_DESCRIPTION = """ | |
RUSpellRU is a first benchmark on the task of automatic spelling correction for Russian language | |
introduced in https://www.dialog-21.ru/media/3427/sorokinaaetal.pdf. | |
Original sentences are drawn from social media domain and labeled by | |
human annotators. | |
""" | |
_MEDSPELLCHECK_DESCRIPTION = """ | |
The dataset is taken from GitHub repo associated with eponymos project https://github.com/DmitryPogrebnoy/MedSpellChecker. | |
Original sentences are taken from anonymized medical anamnesis and passed through | |
two-stage manual labeling pipeline. | |
""" | |
_RUSSIAN_SPELLCHECK_BENCHMARK_CITATION = """ # TODO: add citation""" | |
_MULTIDOMAIN_GOLD_CITATION = """ # TODO: add citation from Dialog""" | |
_GITHUB_TYPO_CORPUS_RU_CITATION = """ | |
@article{DBLP:journals/corr/abs-1911-12893, | |
author = {Masato Hagiwara and | |
Masato Mita}, | |
title = {GitHub Typo Corpus: {A} Large-Scale Multilingual Dataset of Misspellings | |
and Grammatical Errors}, | |
journal = {CoRR}, | |
volume = {abs/1911.12893}, | |
year = {2019}, | |
url = {http://arxiv.org/abs/1911.12893}, | |
eprinttype = {arXiv}, | |
eprint = {1911.12893}, | |
timestamp = {Wed, 08 Jan 2020 15:28:22 +0100}, | |
biburl = {https://dblp.org/rec/journals/corr/abs-1911-12893.bib}, | |
bibsource = {dblp computer science bibliography, https://dblp.org} | |
} | |
""" | |
_RUSPELLRU_CITATION = """ | |
@inproceedings{Shavrina2016SpellRuevalT, | |
title={SpellRueval : the FiRSt Competition on automatiC Spelling CoRReCtion FoR RuSSian}, | |
author={Tatiana Shavrina and Россия Москва and Москва Яндекс and Россия and Россия Долгопрудный}, | |
year={2016} | |
} | |
""" | |
_LICENSE = "apache-2.0" | |
class RussianSpellcheckBenchmarkConfig(datasets.BuilderConfig): | |
"""BuilderConfig for RussianSpellcheckBenchmark.""" | |
def __init__( | |
self, | |
data_urls: Dict[str,str], | |
features: List[str], | |
citation: str, | |
**kwargs, | |
): | |
"""BuilderConfig for RussianSpellcheckBenchmark. | |
Args: | |
features: *list[string]*, list of the features that will appear in the | |
feature dict. Should not include "label". | |
data_urls: *dict[string]*, urls to download the zip file from. | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(RussianSpellcheckBenchmarkConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs) | |
self.data_urls = data_urls | |
self.features = features | |
self.citation = citation | |
class RussianSpellcheckBenchmark(datasets.GeneratorBasedBuilder): | |
"""Russian Spellcheck Benchmark.""" | |
BUILDER_CONFIGS = [ | |
RussianSpellcheckBenchmarkConfig( | |
name="GitHubTypoCorpusRu", | |
description=_GITHUB_TYPO_CORPUS_RU_DESCRIPTION, | |
data_urls={ | |
"test": "data/GitHubTypoCorpusRu/test.json", | |
}, | |
features=["source", "correction", "domain"], | |
citation=_GITHUB_TYPO_CORPUS_RU_CITATION, | |
), | |
RussianSpellcheckBenchmarkConfig( | |
name="MedSpellchecker", | |
description=_MEDSPELLCHECK_DESCRIPTION, | |
data_urls={ | |
"test": "data/MedSpellchecker/test.json", | |
}, | |
features=["source", "correction", "domain"], | |
citation="", | |
), | |
RussianSpellcheckBenchmarkConfig( | |
name="MultidomainGold", | |
description=_MULTIDOMAIN_GOLD_DESCRIPTION, | |
data_urls={ | |
"train": "data/MultidomainGold/train.json", | |
"test": "data/MultidomainGold/test.json", | |
}, | |
features=["source", "correction", "domain"], | |
citation=_MULTIDOMAIN_GOLD_CITATION, | |
), | |
RussianSpellcheckBenchmarkConfig( | |
name="RUSpellRU", | |
description=_RUSPELLRU_DESCRIPTION, | |
data_urls={ | |
"test": "data/RUSpellRU/test.json", | |
"train": "data/RUSpellRU/train.json", | |
}, | |
features=["source", "correction", "domain"], | |
citation=_RUSPELLRU_CITATION, | |
), | |
] | |
def _info(self) -> datasets.DatasetInfo: | |
features = { | |
"source": datasets.Value("string"), | |
"correction": datasets.Value("string"), | |
"domain": datasets.Value("string"), | |
} | |
return datasets.DatasetInfo( | |
features=datasets.Features(features), | |
description=_RUSSIAN_SPELLCHECK_BENCHMARK_DESCRIPTION + self.config.description, | |
license=_LICENSE, | |
citation=self.config.citation + "\n" + _RUSSIAN_SPELLCHECK_BENCHMARK_CITATION, | |
) | |
def _split_generators( | |
self, dl_manager: datasets.DownloadManager | |
) -> List[datasets.SplitGenerator]: | |
urls_to_download = self.config.data_urls | |
downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
if self.config.name == "GitHubTypoCorpusRu" or \ | |
self.config.name == "MedSpellchecker": | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"data_file": downloaded_files["test"], | |
"split": datasets.Split.TEST, | |
}, | |
) | |
] | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"data_file": downloaded_files["train"], | |
"split": datasets.Split.TRAIN, | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"data_file": downloaded_files["test"], | |
"split": datasets.Split.TEST, | |
}, | |
) | |
] | |
def _generate_examples(self, data_file, split): | |
with open(data_file, encoding="utf-8") as f: | |
key = 0 | |
for line in f: | |
row = json.loads(line) | |
example = {feature: row[feature] for feature in self.config.features} | |
yield key, example | |
key += 1 | |