NikitaMartynov commited on
Commit
15a35eb
1 Parent(s): 888a2bb

minor fixes

Browse files
Files changed (2) hide show
  1. README.md +2 -10
  2. russian_multidomain_spellcheck.py +101 -0
README.md CHANGED
@@ -8,7 +8,7 @@ size_categories:
8
  - 1K<n<10K
9
  ---
10
 
11
- # Dataset Card for Dataset Name
12
 
13
  ## Dataset Description
14
 
@@ -19,7 +19,7 @@ size_categories:
19
 
20
  ### Dataset Summary
21
 
22
- Multidomain Russian Spellcheck dataset is a benchmark of 1711 sentence pairs dedicated to a problem of automatic spelling correction in Russian language. The dataset is gathered from five different domains including news, Russian classic literature, social media texts, open web and strategic documents. It has been passed through two-stage manual labeling process with native speakers as annotators to correct spelling violation and preserve original style of text at the same time.
23
 
24
  ## Dataset Structure
25
 
@@ -40,14 +40,6 @@ Russian.
40
  }
41
  ```
42
 
43
- The example in English for illustration purposes:
44
- ```
45
- {
46
- "sources": "Видела в городе афиши, анонсрующие ее концерт.",
47
- "corrections": "Видела в городе афиши, анонсирующие её концерт",
48
- "domain": "aranea"
49
- }
50
- ```
51
 
52
  ### Data Fields
53
 
 
8
  - 1K<n<10K
9
  ---
10
 
11
+ # Dataset Card for RuSpellGold
12
 
13
  ## Dataset Description
14
 
 
19
 
20
  ### Dataset Summary
21
 
22
+ RuSpellGold is a benchmark of 1711 sentence pairs dedicated to a problem of automatic spelling correction in Russian language. The dataset is gathered from five different domains including news, Russian classic literature, social media texts, open web and strategic documents. It has been passed through two-stage manual labeling process with native speakers as annotators to correct spelling violation and preserve original style of text at the same time.
23
 
24
  ## Dataset Structure
25
 
 
40
  }
41
  ```
42
 
 
 
 
 
 
 
 
 
43
 
44
  ### Data Fields
45
 
russian_multidomain_spellcheck.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import List
3
+
4
+ import datasets
5
+
6
+
7
+ _DESCRIPTION = """
8
+ RuSpellGold is a benchmark of 1711 sentence pairs
9
+ dedicated to a problem of automatic spelling correction in Russian language.
10
+ The dataset is gathered from five different domains including news, Russian classic literature,
11
+ social media texts, open web and strategic documents.
12
+ It has been passed through two-stage manual labeling process with native speakers as annotators
13
+ to correct spelling violation and preserve original style of text at the same time.
14
+ """
15
+
16
+ _LICENSE = "apache-2.0"
17
+
18
+
19
+ class RuSpellGoldConfig(datasets.BuilderConfig):
20
+ """BuilderConfig for RuFacts."""
21
+
22
+ def __init__(self, data_urls, features, **kwargs):
23
+ """BuilderConfig for RuFacts.
24
+ Args:
25
+ features: *list[string]*, list of the features that will appear in the
26
+ feature dict. Should not include "label".
27
+ data_urls: *dict[string]*, urls to download the zip file from.
28
+ **kwargs: keyword arguments forwarded to super.
29
+ """
30
+ super(RuFactsConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs)
31
+ self.data_urls = data_urls
32
+ self.features = features
33
+
34
+
35
+ class RuSpellGold(datasets.GeneratorBasedBuilder):
36
+ """RuFacts dataset."""
37
+
38
+ BUILDER_CONFIGS = [
39
+ RuFactsConfig(
40
+ name="raw",
41
+ data_urls={
42
+ "train": "raw/train.json",
43
+ "validation": "raw/validation.json",
44
+ "test": "raw/test.json",
45
+ },
46
+ features=["idx", "evidence", "claim", "label"],
47
+ ),
48
+ ]
49
+
50
+ def _info(self) -> datasets.DatasetInfo:
51
+ features = {
52
+ "idx": datasets.Value("int64"),
53
+ "evidence": datasets.Value("string"),
54
+ "claim": datasets.Value("string"),
55
+ "label": datasets.features.ClassLabel(names=["consistent", "inconsistent"]),
56
+ }
57
+ return datasets.DatasetInfo(
58
+ features=datasets.Features(features),
59
+ description=_DESCRIPTION,
60
+ license=_LICENSE,
61
+ )
62
+
63
+ def _split_generators(
64
+ self, dl_manager: datasets.DownloadManager
65
+ ) -> List[datasets.SplitGenerator]:
66
+ urls_to_download = self.config.data_urls
67
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
68
+ return [
69
+ datasets.SplitGenerator(
70
+ name=datasets.Split.TRAIN,
71
+ gen_kwargs={
72
+ "data_file": downloaded_files["train"],
73
+ "split": datasets.Split.TRAIN,
74
+ },
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.VALIDATION,
78
+ gen_kwargs={
79
+ "data_file": downloaded_files["validation"],
80
+ "split": datasets.Split.VALIDATION,
81
+ },
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TEST,
85
+ gen_kwargs={
86
+ "data_file": downloaded_files["test"],
87
+ "split": datasets.Split.TEST,
88
+ },
89
+ ),
90
+ ]
91
+
92
+ def _generate_examples(self, data_file, split):
93
+ with open(data_file, encoding="utf-8") as f:
94
+ key = 0
95
+ for line in f:
96
+ row = json.loads(line)
97
+
98
+ example = {feature: row[feature] for feature in self.config.features}
99
+ yield key, example
100
+
101
+ key += 1