File size: 9,069 Bytes
5f80e1a 52ccd53 5f80e1a 52ccd53 5f80e1a 79c132b 5f80e1a 52ccd53 5f80e1a 79c132b 5f80e1a 52ccd53 5f80e1a 52ccd53 5f80e1a 79c132b 5f80e1a 79c132b 5f80e1a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The Russian Spellcheck Benchmark"""
import os
import json
import pandas as pd
from typing import List, Dict, Optional
import datasets
_RUSSIAN_SPELLCHECK_BENCHMARK_DESCRIPTION = """
Russian Spellcheck Benchmark is a new benchmark for spelling correction in Russian language.
It includes four datasets, each of which consists of pairs of sentences in Russian language.
Each pair embodies sentence, which may contain spelling errors, and its corresponding correction.
Datasets were gathered from various sources and domains including social networks, internet blogs, github commits,
medical anamnesis, literature, news, reviews and more.
"""
_MULTIDOMAIN_GOLD_DESCRIPTION = """
MultidomainGold is a dataset of 3500 sentence pairs
dedicated to a problem of automatic spelling correction in Russian language.
The dataset is gathered from seven different domains including news, Russian classic literature,
social media texts, open web, strategic documents, subtitles and reviews.
It has been passed through two-stage manual labeling process with native speakers as annotators
to correct spelling violation and preserve original style of text at the same time.
"""
_GITHUB_TYPO_CORPUS_RU_DESCRIPTION = """
GitHubTypoCorpusRu is a manually labeled part of GitHub Typo Corpus https://arxiv.org/abs/1911.12893.
The sentences with "ru" tag attached to them have been extracted from GitHub Typo Corpus
and pass them through manual labeling to ensure the corresponding corrections are right.
"""
_RUSPELLRU_DESCRIPTION = """
RUSpellRU is a first benchmark on the task of automatic spelling correction for Russian language
introduced in https://www.dialog-21.ru/media/3427/sorokinaaetal.pdf.
Original sentences are drawn from social media domain and labeled by
human annotators.
"""
_MEDSPELLCHECK_DESCRIPTION = """
The dataset is taken from GitHub repo associated with eponymos project https://github.com/DmitryPogrebnoy/MedSpellChecker.
Original sentences are taken from anonymized medical anamnesis and passed through
two-stage manual labeling pipeline.
"""
_RUSSIAN_SPELLCHECK_BENCHMARK_CITATION = """ # TODO: add citation"""
_MULTIDOMAIN_GOLD_CITATION = """ # TODO: add citation from Dialog"""
_GITHUB_TYPO_CORPUS_RU_CITATION = """
@article{DBLP:journals/corr/abs-1911-12893,
author = {Masato Hagiwara and
Masato Mita},
title = {GitHub Typo Corpus: {A} Large-Scale Multilingual Dataset of Misspellings
and Grammatical Errors},
journal = {CoRR},
volume = {abs/1911.12893},
year = {2019},
url = {http://arxiv.org/abs/1911.12893},
eprinttype = {arXiv},
eprint = {1911.12893},
timestamp = {Wed, 08 Jan 2020 15:28:22 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1911-12893.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_RUSPELLRU_CITATION = """
@inproceedings{Shavrina2016SpellRuevalT,
title={SpellRueval : the FiRSt Competition on automatiC Spelling CoRReCtion FoR RuSSian},
author={Tatiana Shavrina and Россия Москва and Москва Яндекс and Россия and Россия Долгопрудный},
year={2016}
}
"""
_LICENSE = "apache-2.0"
class RussianSpellcheckBenchmarkConfig(datasets.BuilderConfig):
"""BuilderConfig for RussianSpellcheckBenchmark."""
def __init__(
self,
data_urls: Dict[str,str],
features: List[str],
citation: str,
**kwargs,
):
"""BuilderConfig for RussianSpellcheckBenchmark.
Args:
features: *list[string]*, list of the features that will appear in the
feature dict. Should not include "label".
data_urls: *dict[string]*, urls to download the zip file from.
**kwargs: keyword arguments forwarded to super.
"""
super(RussianSpellcheckBenchmarkConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs)
self.data_urls = data_urls
self.features = features
self.citation = citation
class RussianSpellcheckBenchmark(datasets.GeneratorBasedBuilder):
"""Russian Spellcheck Benchmark."""
BUILDER_CONFIGS = [
RussianSpellcheckBenchmarkConfig(
name="GitHubTypoCorpusRu",
description=_GITHUB_TYPO_CORPUS_RU_DESCRIPTION,
data_urls={
"test": "data/GitHubTypoCorpusRu/test.json",
},
features=["source", "correction", "domain"],
citation=_GITHUB_TYPO_CORPUS_RU_CITATION,
),
RussianSpellcheckBenchmarkConfig(
name="MedSpellchecker",
description=_MEDSPELLCHECK_DESCRIPTION,
data_urls={
"test": "data/MedSpellchecker/test.json",
},
features=["source", "correction", "domain"],
citation="",
),
RussianSpellcheckBenchmarkConfig(
name="MultidomainGold",
description=_MULTIDOMAIN_GOLD_DESCRIPTION,
data_urls={
"train": "data/MultidomainGold/train.json",
"test": "data/MultidomainGold/test.json",
},
features=["source", "correction", "domain"],
citation=_MULTIDOMAIN_GOLD_CITATION,
),
RussianSpellcheckBenchmarkConfig(
name="RUSpellRU",
description=_RUSPELLRU_DESCRIPTION,
data_urls={
"test": "data/RUSpellRU/test.json",
"train": "data/RUSpellRU/train.json",
},
features=["source", "correction", "domain"],
citation=_RUSPELLRU_CITATION,
),
]
def _info(self) -> datasets.DatasetInfo:
features = {
"source": datasets.Value("string"),
"correction": datasets.Value("string"),
"domain": datasets.Value("string"),
}
return datasets.DatasetInfo(
features=datasets.Features(features),
description=_RUSSIAN_SPELLCHECK_BENCHMARK_DESCRIPTION + self.config.description,
license=_LICENSE,
citation=self.config.citation + "\n" + _RUSSIAN_SPELLCHECK_BENCHMARK_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
urls_to_download = self.config.data_urls
downloaded_files = dl_manager.download_and_extract(urls_to_download)
if self.config.name == "GitHubTypoCorpusRu" or \
self.config.name == "MedSpellchecker":
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": downloaded_files["test"],
"split": datasets.Split.TEST,
},
)
]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": downloaded_files["train"],
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": downloaded_files["test"],
"split": datasets.Split.TEST,
},
)
]
def _generate_examples(self, data_file, split):
with open(data_file, encoding="utf-8") as f:
key = 0
for line in f:
row = json.loads(line)
example = {feature: row[feature] for feature in self.config.features}
yield key, example
key += 1
|