flexthink commited on
Commit
a3d066a
1 Parent(s): 77d22e9

Initial commit

Browse files
.gitattributes CHANGED
@@ -35,3 +35,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ dataset/lexicon_test.json filter=lfs diff=lfs merge=lfs -text
39
+ dataset/lexicon_train.json filter=lfs diff=lfs merge=lfs -text
40
+ dataset/lexicon_valid.json filter=lfs diff=lfs merge=lfs -text
41
+ dataset/sentence_test.json filter=lfs diff=lfs merge=lfs -text
42
+ dataset/sentence_train.json filter=lfs diff=lfs merge=lfs -text
43
+ dataset/sentence_valid.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # librig2p-nostress - Grapheme-To-Phoneme Dataset
2
+
3
+ This dataset contains samples that can be used to train a Grapheme-to-Phoneme system **without** stress information.
4
+
5
+ The dataset is derived from the following pre-existing datasets:
6
+
7
+ * [LibriSpeech ASR Corpus](https://www.openslr.org/12)
8
+ * [LibriSpeech Alignments](https://github.com/CorentinJ/librispeech-alignments)
9
+ * [Wikipedia Homograph Disambiguation Data](https://github.com/google/WikipediaHomographData)
dataset/lexicon_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0347606ab149f930db118d647fffd70dfd044fa9e25939b123f06c6aa8de51b4
3
+ size 361442
dataset/lexicon_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d36f6af961d981788a9279c2739bac17cf6e00c01ca2cff35d0b90b0b87d1f6c
3
+ size 35212147
dataset/lexicon_valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b422797758d6c020fc3df607ca72adb41f5ac252024fe351e216db4c8387322
3
+ size 358580
dataset/sentence_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18fe6d8d3202c312aa1916b7c3b4d908e5ee2acb295b3b1309fd5ac2f4841125
3
+ size 3318940
dataset/sentence_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:493914a5e9a4114b2e90736d2969aa4bc99bcd5898eec2070d9aa51a962712f3
3
+ size 217148647
dataset/sentence_valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c6600d164d75ddffd68f62f572aec98660b06b0fef35bfb6226afdd8331855f
3
+ size 3406399
librig2p-nostress-space-cmu.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Artem Ploujnikov
3
+
4
+
5
+ # Lint as: python3
6
+ import json
7
+
8
+ import datasets
9
+
10
+ _DESCRIPTION = """\
11
+ Grapheme-to-Phoneme training, validation and test sets
12
+ """
13
+
14
+ _BASE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress-space/resolve/main/dataset"
15
+ _HOMEPAGE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress-space"
16
+ _NA = "N/A"
17
+ _SPLIT_TYPES = ["train", "valid", "test"]
18
+ _DATA_TYPES = ["lexicon", "sentence", "homograph"]
19
+ _SPLITS = [
20
+ f"{data_type}_{split_type}"
21
+ for data_type in _DATA_TYPES
22
+ for split_type in _SPLIT_TYPES
23
+ ]
24
+
25
+
26
+ class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
27
+ def __init__(self, base_url=None, splits=None, *args, **kwargs):
28
+ super().__init__(*args, **kwargs)
29
+ self.base_url = base_url or _BASE_URL
30
+ self.splits = splits or _SPLITS
31
+
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {
37
+ "sample_id": datasets.Value("string"),
38
+ "speaker_id": datasets.Value("string"),
39
+ "origin": datasets.Value("string"),
40
+ "char": datasets.Value("string"),
41
+ "phn": datasets.Sequence(datasets.Value("string")),
42
+ "homograph": datasets.Value("string"),
43
+ "homograph_wordid": datasets.Value("string"),
44
+ "homograph_char_start": datasets.Value("int32"),
45
+ "homograph_char_end": datasets.Value("int32"),
46
+ "homograph_phn_start": datasets.Value("int32"),
47
+ "homograph_phn_end": datasets.Value("int32"),
48
+ },
49
+ ),
50
+ supervised_keys=None,
51
+ homepage=_HOMEPAGE_URL,
52
+ )
53
+
54
+ def _get_url(self, split):
55
+ return f"{self.base_url}/{split}.json"
56
+
57
+ def _split_generator(self, dl_manager, split):
58
+ url = self._get_url(split)
59
+ path = dl_manager.download_and_extract(url)
60
+ return datasets.SplitGenerator(
61
+ name=split, gen_kwargs={"datapath": path, "datatype": split},
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ return [self._split_generator(dl_manager, split) for split in self.splits]
66
+
67
+ def _generate_examples(self, datapath, datatype):
68
+ with open(datapath, encoding="utf-8") as f:
69
+ data = json.load(f)
70
+ for sentence_counter, (sample_id, item) in enumerate(data.items()):
71
+ resp = {
72
+ "sample_id": sample_id,
73
+ "speaker_id": str(item.get("speaker_id") or _NA),
74
+ "origin": item["origin"],
75
+ "char": item["char"],
76
+ "phn": item["phn"],
77
+ "homograph": item.get("homograph", _NA),
78
+ "homograph_wordid": item.get("homograph_wordid", _NA),
79
+ "homograph_char_start": item.get("homograph_char_start", 0),
80
+ "homograph_char_end": item.get("homograph_char_end", 0),
81
+ "homograph_phn_start": item.get("homograph_phn_start", 0),
82
+ "homograph_phn_end": item.get("homograph_phn_end", 0),
83
+ }
84
+ yield sentence_counter, resp