Datasets:
Add files
Browse files- .gitattributes +3 -0
- README.md +30 -1
- data/dataset/dataset.csv +3 -0
- scripts/generate_dataset.py +190 -0
- scripts/get_data.py +118 -0
.gitattributes
CHANGED
@@ -53,3 +53,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
txt filter=lfs diff=lfs merge=lfs -text
|
57 |
+
*.txt filter=lfs diff=lfs merge=lfs -text
|
58 |
+
*.csv filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,3 +1,32 @@
|
|
1 |
---
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
language: fr
|
3 |
+
tags:
|
4 |
+
- grammar
|
5 |
+
- spelling correction
|
6 |
+
license: MIT
|
7 |
+
datasets:
|
8 |
+
- synthetic
|
9 |
---
|
10 |
+
|
11 |
+
# Spelling correction dataset (French)
|
12 |
+
|
13 |
+
This dataset is generated by transforming/corrupting sentences of a French news corpus
|
14 |
+
provided by the [University of Leipzig](https://wortschatz.uni-leipzig.de/en/download/French).
|
15 |
+
|
16 |
+
The following transformations are applied to words in the sentences:
|
17 |
+
- concatenation of pairs of words
|
18 |
+
- swapping of neighboring letters in words
|
19 |
+
- insertion
|
20 |
+
- deletion
|
21 |
+
- replacement (by neighboring characters in AZERTY keyboard)
|
22 |
+
|
23 |
+
|
24 |
+
## Generation
|
25 |
+
|
26 |
+
`pip install happytransformer `
|
27 |
+
|
28 |
+
```bash
|
29 |
+
./scripts/get_data.py -t news -y 2023 -s 10K
|
30 |
+
./scripts/generate_dataset.py -i data/fra_news_2023_10k/fra_news_2023_10k-sentences.txt
|
31 |
+
```
|
32 |
+
|
data/dataset/dataset.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:568adf975baad0f56b2a7e366ddfbb66e012cb2fab59de67d326019e2eae2091
|
3 |
+
size 13231623
|
scripts/generate_dataset.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
import argparse
|
6 |
+
import random
|
7 |
+
import csv
|
8 |
+
import re
|
9 |
+
import logging
|
10 |
+
from tqdm import tqdm
|
11 |
+
|
12 |
+
logger = logging.getLogger()
|
13 |
+
logger.setLevel(logging.INFO)
|
14 |
+
|
15 |
+
|
16 |
+
REPLACEMENT_MAP = {
|
17 |
+
'a': ['&', 'é', 'ã', '1', '2', 'z', 'q'],
|
18 |
+
'ã': ['a', 'é', '&', '"', 'a', 'z', '1', '2', '3'],
|
19 |
+
'à': ['a', 'è', '-', '_', '\\', 'y', 'u', '6', '7', '8'],
|
20 |
+
'ä': ['a', 'â', 'p', 'm', '$', '£', ')', ']', '=', '}', 'º', '+', '%', 'ù'],
|
21 |
+
'â': ['a', 'ä', 'p', 'm', '$', '£', ')', ']', '=', '}', 'º', '+', '%', 'ù'],
|
22 |
+
'b': ['v', 'n', 'g', 'h'],
|
23 |
+
'c': ['x', 'v', 'f', 'g'],
|
24 |
+
'ç': ['c', '_', '\\', 'à', '@', 'i', 'o', '8', '9', '0'],
|
25 |
+
'd': ['s', 'f', 'e', '€', 'r', 'x', 'c'],
|
26 |
+
'e': ['€', 'z', 'r', 's', 'd', '"', '#', '\'', '{', '3', '4'],
|
27 |
+
'é': ['e', '€', 'ã', '&', '"', 'a', 'z', '1', '2', '3'],
|
28 |
+
'ê': ['e', '€', 'z', 'r', 's', 'd', '"', '#', '\'', '{'],
|
29 |
+
'è': ['e', '€', 'à', '-', '_', '\\', 'y', 'u', '6', '7', '8'],
|
30 |
+
'f': ['d', 'g', 'r', 't', 'c', 'v'],
|
31 |
+
'g': ['f', 'h', 't', 'y', 'v', 'b'],
|
32 |
+
'h': ['g', 'j', 'y', 'u', 'b', 'n'],
|
33 |
+
'i': ['u', 'o', '_', '\\', 'ç', '^', 'j', 'k', '8', '9'],
|
34 |
+
'j': ['h', 'k', 'u', 'i', 'n', '?', ','],
|
35 |
+
'k': ['j', 'l', 'i', 'o', '?', ',', '.', ';'],
|
36 |
+
'l': ['k', 'm', 'o', 'p', '.', ';', '/', ':'],
|
37 |
+
'm': ['l', '%', 'ù', 'p', 'ä', 'â', '/', ':', '§', '!'],
|
38 |
+
'n': ['b', '?', ',', 'h', 'j'],
|
39 |
+
'o': ['i', 'p', 'ç', '^', 'à', '@', 'k', 'l', '9', '0'],
|
40 |
+
'p': ['o', 'ä', 'â', 'à', '@', ')', ']', 'l', 'm', '0', 'º'],
|
41 |
+
'q': ['a', 'z', 's', 'w', '>', '<'],
|
42 |
+
'r': ['e', 't', 'd', 'f', '\'', '{', '(', '[', '4', '5'],
|
43 |
+
's': ['q', 'd', 'z', 'w', 'x'],
|
44 |
+
't': ['r', 'y', 'f', 'g', '(', '[', '-', '5', '6'],
|
45 |
+
'u': ['y', 'i', 'h', 'j', 'è', 'à', '_', '\\', '7', '8'],
|
46 |
+
'ù': ['u', '%', 'm', 'ä', 'â', '§', '!', '$', '£'],
|
47 |
+
'v': ['c', 'b', 'f', 'g'],
|
48 |
+
'x': ['w', 'c', 's', 'd'],
|
49 |
+
'y': ['t', 'u', 'g', 'h', '_', 'è', 'à', '6', '7'],
|
50 |
+
'w': ['>', '<', 'x', 'q', 's'],
|
51 |
+
'z': ['a', 'e', '€', 'q', 's', 'é', 'ã', '"', '#', '2', '3'],
|
52 |
+
}
|
53 |
+
|
54 |
+
|
55 |
+
SENTENCE_ID = re.compile(r'^\d+\t')
|
56 |
+
|
57 |
+
|
58 |
+
def rotate(word: str) -> str:
|
59 |
+
if len(word) < 3:
|
60 |
+
return word
|
61 |
+
i = random.randint(1, len(word) - 2)
|
62 |
+
j = i + 1 if random.random() > 0.5 else i - 1
|
63 |
+
letters = list(word)
|
64 |
+
letters[i], letters[j] = letters[j], letters[i]
|
65 |
+
word_ = ''.join(letters)
|
66 |
+
return word_
|
67 |
+
|
68 |
+
|
69 |
+
def replace(word: str) -> str:
|
70 |
+
word_ = '%s' % word
|
71 |
+
i = random.randint(0, len(word) - 1)
|
72 |
+
is_lower = word[i].lower() == word[i]
|
73 |
+
if word[i].lower() in REPLACEMENT_MAP:
|
74 |
+
c = random.choice(REPLACEMENT_MAP[word[i].lower()])
|
75 |
+
c = c if is_lower else c.upper()
|
76 |
+
letters = list(word)
|
77 |
+
letters[i] = c
|
78 |
+
word_ = ''.join(letters)
|
79 |
+
return word_
|
80 |
+
|
81 |
+
|
82 |
+
def insert(word: str) -> str:
|
83 |
+
i = random.randint(0, len(word) - 1)
|
84 |
+
c = word[i]
|
85 |
+
j = i + 1 if random.random() > 0.5 else i - 1
|
86 |
+
letters = list(word)
|
87 |
+
letters.insert(j, c)
|
88 |
+
word_ = ''.join(letters)
|
89 |
+
return word_
|
90 |
+
|
91 |
+
|
92 |
+
def delete(word: str) -> str:
|
93 |
+
i = random.randint(0, len(word) - 1)
|
94 |
+
word_ = word[:i] + word[i + 1:]
|
95 |
+
return word_
|
96 |
+
|
97 |
+
|
98 |
+
OPERATIONS = [
|
99 |
+
rotate,
|
100 |
+
insert,
|
101 |
+
delete,
|
102 |
+
replace
|
103 |
+
]
|
104 |
+
|
105 |
+
|
106 |
+
def transform_sentence(sentence: str, probability: float = 0.25) -> str:
|
107 |
+
words = sentence.split()
|
108 |
+
|
109 |
+
# Join two words with probability 0.5
|
110 |
+
if random.random() < 0.5 and len(words) > 1:
|
111 |
+
i = random.randint(0, len(words) - 1)
|
112 |
+
if random.random() < 0.5 and i < len(words) - 1 or i == 0:
|
113 |
+
j = i + 1
|
114 |
+
words[i] = f"{words[i]}{words[j]}"
|
115 |
+
else:
|
116 |
+
j = i - 1
|
117 |
+
words[i] = f"{words[j]}{words[i]}"
|
118 |
+
del words[j]
|
119 |
+
|
120 |
+
# Transform each word with probability 'probability'
|
121 |
+
for i, word in enumerate(words):
|
122 |
+
if random.random() < probability and not word.isdigit():
|
123 |
+
words[i] = random.choice(OPERATIONS)(word)
|
124 |
+
sentence_ = ' '.join(words)
|
125 |
+
return sentence_
|
126 |
+
|
127 |
+
|
128 |
+
def main(args: argparse.Namespace) -> int:
|
129 |
+
if not os.path.exists(args.input_data_path):
|
130 |
+
logger.error("Invalid input data path.")
|
131 |
+
return -1
|
132 |
+
|
133 |
+
sentences = []
|
134 |
+
with open(args.input_data_path, 'r') as f:
|
135 |
+
for sentence in f:
|
136 |
+
if SENTENCE_ID.search(sentence):
|
137 |
+
sentence = SENTENCE_ID.sub('', sentence)
|
138 |
+
sentence = (
|
139 |
+
sentence
|
140 |
+
.replace('“', '')
|
141 |
+
.replace('”', '')
|
142 |
+
.replace('"', '')
|
143 |
+
.replace('«', '')
|
144 |
+
.replace('»', '')
|
145 |
+
)
|
146 |
+
sentences.append(' '.join(sentence.split()))
|
147 |
+
|
148 |
+
dirname = os.path.dirname(args.output_data_path)
|
149 |
+
if not os.path.exists(dirname):
|
150 |
+
os.mkdir(dirname)
|
151 |
+
|
152 |
+
logger.info("Transforming sentences to generate cases")
|
153 |
+
with open(args.output_data_path, 'w', newline='') as csv_file:
|
154 |
+
writer = csv.writer(csv_file, delimiter=',')
|
155 |
+
writer.writerow(["input", "target"])
|
156 |
+
for sentence in tqdm(sentences):
|
157 |
+
correction = sentence
|
158 |
+
for case in range(args.number_of_cases):
|
159 |
+
transformed_sentence = transform_sentence(sentence)
|
160 |
+
if transformed_sentence == sentence:
|
161 |
+
continue
|
162 |
+
input_text = f"grammaire: {transformed_sentence}"
|
163 |
+
writer.writerow([input_text, correction])
|
164 |
+
|
165 |
+
return 0
|
166 |
+
|
167 |
+
|
168 |
+
if __name__ == "__main__":
|
169 |
+
parser = argparse.ArgumentParser()
|
170 |
+
parser.add_argument(
|
171 |
+
'--input-data-path', '-i',
|
172 |
+
type=str,
|
173 |
+
required=True
|
174 |
+
)
|
175 |
+
parser.add_argument(
|
176 |
+
'--output-data-path', '-o',
|
177 |
+
type=str,
|
178 |
+
default='data/dataset/dataset.csv'
|
179 |
+
)
|
180 |
+
parser.add_argument(
|
181 |
+
'--number-of-cases', '-n',
|
182 |
+
type=int,
|
183 |
+
default=5
|
184 |
+
)
|
185 |
+
|
186 |
+
args, _ = parser.parse_known_args()
|
187 |
+
|
188 |
+
sys.exit(
|
189 |
+
main(args)
|
190 |
+
)
|
scripts/get_data.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
import argparse
|
6 |
+
import requests
|
7 |
+
from enum import Enum
|
8 |
+
import urllib.parse as urlparse
|
9 |
+
import logging
|
10 |
+
import tarfile
|
11 |
+
|
12 |
+
|
13 |
+
logger = logging.getLogger()
|
14 |
+
logger.setLevel(logging.INFO)
|
15 |
+
|
16 |
+
|
17 |
+
class EnumString(Enum):
|
18 |
+
def __str__(self: Enum) -> str:
|
19 |
+
return self.value
|
20 |
+
|
21 |
+
|
22 |
+
class CorpusType(EnumString):
|
23 |
+
NEWS = "news"
|
24 |
+
WIKI = "wikipedia"
|
25 |
+
|
26 |
+
|
27 |
+
class CorpusSize(EnumString):
|
28 |
+
SMALLEST = "10K"
|
29 |
+
SMALL = "30K"
|
30 |
+
MEDIUM = "100K"
|
31 |
+
LARGE = "300K"
|
32 |
+
LARGEST = "1M"
|
33 |
+
|
34 |
+
|
35 |
+
BASE_URL = "https://downloads.wortschatz-leipzig.de"
|
36 |
+
URL_PREFIX = "corpora"
|
37 |
+
FILE_TEMPLATE = "fra_{type}_{year}_{size}.tar.gz"
|
38 |
+
|
39 |
+
|
40 |
+
def main(args: argparse.Namespace) -> int:
|
41 |
+
if not os.path.exists(args.dst_dir):
|
42 |
+
logger.info(f"Invalid destination directory: '{args.dst_dir}'.")
|
43 |
+
return -1
|
44 |
+
|
45 |
+
filename = FILE_TEMPLATE.format(
|
46 |
+
type=args.type.value,
|
47 |
+
year=args.year,
|
48 |
+
size=args.size.value,
|
49 |
+
)
|
50 |
+
url = urlparse.urljoin(
|
51 |
+
BASE_URL,
|
52 |
+
f"{URL_PREFIX}/{filename}"
|
53 |
+
)
|
54 |
+
|
55 |
+
try:
|
56 |
+
file_path = os.path.join(args.dst_dir, filename)
|
57 |
+
logger.info("Downloading %s" % file_path)
|
58 |
+
with open(file_path, 'wb') as f:
|
59 |
+
response = requests.get(url, stream=True)
|
60 |
+
total_length = response.headers.get('content-length')
|
61 |
+
|
62 |
+
if total_length is None: # no content length header
|
63 |
+
f.write(response.content)
|
64 |
+
else:
|
65 |
+
dl = 0
|
66 |
+
total_length = int(total_length)
|
67 |
+
for data in response.iter_content(chunk_size=4096):
|
68 |
+
dl += len(data)
|
69 |
+
f.write(data)
|
70 |
+
done = int(50 * dl / total_length)
|
71 |
+
done_bar = '=' * done
|
72 |
+
remainder = ' ' * (50 - done)
|
73 |
+
done_pct = f"{100.0 * dl / total_length:6.2f}"
|
74 |
+
sys.stdout.write("\r[%s%s] [%s]" % (done_bar, remainder, done_pct))
|
75 |
+
sys.stdout.flush()
|
76 |
+
except Exception as error:
|
77 |
+
logger.error(error)
|
78 |
+
return -1
|
79 |
+
|
80 |
+
# Uncompress file
|
81 |
+
try:
|
82 |
+
logger.info("Extracting %s" % file_path)
|
83 |
+
with tarfile.open(file_path) as f:
|
84 |
+
f.extractall(os.path.dirname(file_path))
|
85 |
+
except Exception as error:
|
86 |
+
logger.error(error)
|
87 |
+
return -2
|
88 |
+
|
89 |
+
return 0
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
parser = argparse.ArgumentParser()
|
94 |
+
parser.add_argument(
|
95 |
+
"--type", '-t',
|
96 |
+
type=CorpusType,
|
97 |
+
choices=list(CorpusType)
|
98 |
+
)
|
99 |
+
parser.add_argument(
|
100 |
+
"--year", '-y',
|
101 |
+
type=str,
|
102 |
+
default='2023'
|
103 |
+
)
|
104 |
+
parser.add_argument(
|
105 |
+
"--size", '-s',
|
106 |
+
type=CorpusSize,
|
107 |
+
choices=list(CorpusSize)
|
108 |
+
)
|
109 |
+
parser.add_argument(
|
110 |
+
"--dst-dir", '-d',
|
111 |
+
type=str,
|
112 |
+
default='data'
|
113 |
+
)
|
114 |
+
args, _ = parser.parse_known_args()
|
115 |
+
|
116 |
+
sys.exit(
|
117 |
+
main(args)
|
118 |
+
)
|