parquet-converter commited on
Commit
eb4f219
1 Parent(s): 5169b6b

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,46 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.onnx filter=lfs diff=lfs merge=lfs -text
13
- *.ot filter=lfs diff=lfs merge=lfs -text
14
- *.parquet filter=lfs diff=lfs merge=lfs -text
15
- *.pb filter=lfs diff=lfs merge=lfs -text
16
- *.pt filter=lfs diff=lfs merge=lfs -text
17
- *.pth filter=lfs diff=lfs merge=lfs -text
18
- *.rar filter=lfs diff=lfs merge=lfs -text
19
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
- *.tar.* filter=lfs diff=lfs merge=lfs -text
21
- *.tflite filter=lfs diff=lfs merge=lfs -text
22
- *.tgz filter=lfs diff=lfs merge=lfs -text
23
- *.wasm filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
38
- dataset/lexicon_test.json filter=lfs diff=lfs merge=lfs -text
39
- dataset/lexicon_train.json filter=lfs diff=lfs merge=lfs -text
40
- dataset/lexicon_valid.json filter=lfs diff=lfs merge=lfs -text
41
- dataset/sentence_test.json filter=lfs diff=lfs merge=lfs -text
42
- dataset/sentence_train.json filter=lfs diff=lfs merge=lfs -text
43
- dataset/sentence_valid.json filter=lfs diff=lfs merge=lfs -text
44
- dataset/homograph_test.json filter=lfs diff=lfs merge=lfs -text
45
- dataset/homograph_train.json filter=lfs diff=lfs merge=lfs -text
46
- dataset/homograph_valid.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,12 +0,0 @@
1
- # librig2p-nostress - Grapheme-To-Phoneme Dataset
2
-
3
- This dataset contains samples that can be used to train a Grapheme-to-Phoneme system **without** stress information.
4
-
5
- The dataset is derived from the following pre-existing datasets:
6
-
7
- * [LibriSpeech ASR Corpus](https://www.openslr.org/12)
8
- * [LibriSpeech Alignments](https://github.com/CorentinJ/librispeech-alignments)
9
- * [Wikipedia Homograph Disambiguation Data](https://github.com/google/WikipediaHomographData)
10
- * [CMUDict] (http://www.speech.cs.cmu.edu/cgi-bin/cmudict)
11
-
12
- This version of the dataset applies a correction to LibriSpeech Alignments phoneme annotations by looking up the pronunciations of known words in CMUDict and replacing them with their CMUDict counterparts only if a perfect unique match is found. This reduces the number of discrepancies between homograph data and LibriSpeech data.
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset/lexicon_train.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e6d968b103aa1321b792a5b657842c9f42ee49159ed9b67559f1dba805900d5
3
- size 35212317
 
 
 
 
dataset/sentence_test.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d64cf29efa4363fe62f9ffd1dc76400deefce6ece441aac203000b47e93aab50
3
- size 3321676
 
 
 
 
dataset/sentence_train.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:46cf3fec22db0e9cf4673c2a69c50b8e9ed8428b4a857b34806ba29e0d00bef4
3
- size 217297804
 
 
 
 
dataset/sentence_valid.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e3119e843e81e40d2c98dad2cb96c2e14abadcfeef08e9339fd6e5b96a204e5
3
- size 3408130
 
 
 
 
dataset/homograph_valid.json → default/librig2p-nostress-space-cmu-homograph_test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d06ae492e1a337694a14da231fd3e68b9b70beff6dedcb9e1cae4fbf82705cf2
3
- size 697913
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65b5a8579a2a8e55eb8f89c7e7650f449191cf4f16e761bad6825980cccdde92
3
+ size 87439
default/librig2p-nostress-space-cmu-homograph_train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdd2928e1c03a0c95e930d4914766005e9ef3ac436bf7fbbde6597b0ccfbee5d
3
+ size 1263591
dataset/lexicon_test.json → default/librig2p-nostress-space-cmu-homograph_valid.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0347606ab149f930db118d647fffd70dfd044fa9e25939b123f06c6aa8de51b4
3
- size 361442
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7188de60ed672d58985946263978a933dc4af1d834ebc5235655b8c2380320bc
3
+ size 83984
dataset/homograph_test.json → default/librig2p-nostress-space-cmu-lexicon_test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b883313fd5e16a7d9d8ca8bd881c877f5ad40876d9b81d165e7e97720903c7ba
3
- size 708678
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddb1e14e4d2d885b6c3bb245c4e00c8f07eaa8ae644821f5c629932accb7da75
3
+ size 62526
default/librig2p-nostress-space-cmu-lexicon_train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32887ee7a35fa0ec99cbf90816f3b2e73c3e8fd4649f70568da1fbd5a161fe6b
3
+ size 5545312
dataset/lexicon_valid.json → default/librig2p-nostress-space-cmu-lexicon_valid.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86a9d3af31ebbf9c667f51cb9a258b40346c437e100cac17421b275ff4d41a4e
3
- size 358581
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbcdf5928100738a5498b569c8db0106f0953e0605ec0aabb6445d7e25a22f4f
3
+ size 62199
default/librig2p-nostress-space-cmu-sentence_test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9838dc89a8ff612d5c2503443ad3b99ee5df72096f9535353d37a30d3e124b67
3
+ size 398592
dataset/homograph_train.json → default/librig2p-nostress-space-cmu-sentence_train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b45ba7624628bbf9401b22765aa2725aa0eb7d29a14edda81130ceb67cf44e61
3
- size 12543594
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13d25919874d7a6fc1d6c9280a95e1135b3d0bddf4d6adbd18c4b05136dbfc47
3
+ size 24913567
default/librig2p-nostress-space-cmu-sentence_valid.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d5072f600da87e7f560aeab6bacbef67ad5c5c1ab8df57e938ef44178268b37
3
+ size 413698
librig2p-nostress-space-cmu.py DELETED
@@ -1,84 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 Artem Ploujnikov
3
-
4
-
5
- # Lint as: python3
6
- import json
7
-
8
- import datasets
9
-
10
- _DESCRIPTION = """\
11
- Grapheme-to-Phoneme training, validation and test sets
12
- """
13
-
14
- _BASE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress-space-cmu/resolve/main/dataset"
15
- _HOMEPAGE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress-space-cmu"
16
- _NA = "N/A"
17
- _SPLIT_TYPES = ["train", "valid", "test"]
18
- _DATA_TYPES = ["lexicon", "sentence", "homograph"]
19
- _SPLITS = [
20
- f"{data_type}_{split_type}"
21
- for data_type in _DATA_TYPES
22
- for split_type in _SPLIT_TYPES
23
- ]
24
-
25
-
26
- class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
27
- def __init__(self, base_url=None, splits=None, *args, **kwargs):
28
- super().__init__(*args, **kwargs)
29
- self.base_url = base_url or _BASE_URL
30
- self.splits = splits or _SPLITS
31
-
32
- def _info(self):
33
- return datasets.DatasetInfo(
34
- description=_DESCRIPTION,
35
- features=datasets.Features(
36
- {
37
- "sample_id": datasets.Value("string"),
38
- "speaker_id": datasets.Value("string"),
39
- "origin": datasets.Value("string"),
40
- "char": datasets.Value("string"),
41
- "phn": datasets.Sequence(datasets.Value("string")),
42
- "homograph": datasets.Value("string"),
43
- "homograph_wordid": datasets.Value("string"),
44
- "homograph_char_start": datasets.Value("int32"),
45
- "homograph_char_end": datasets.Value("int32"),
46
- "homograph_phn_start": datasets.Value("int32"),
47
- "homograph_phn_end": datasets.Value("int32"),
48
- },
49
- ),
50
- supervised_keys=None,
51
- homepage=_HOMEPAGE_URL,
52
- )
53
-
54
- def _get_url(self, split):
55
- return f"{self.base_url}/{split}.json"
56
-
57
- def _split_generator(self, dl_manager, split):
58
- url = self._get_url(split)
59
- path = dl_manager.download_and_extract(url)
60
- return datasets.SplitGenerator(
61
- name=split, gen_kwargs={"datapath": path, "datatype": split},
62
- )
63
-
64
- def _split_generators(self, dl_manager):
65
- return [self._split_generator(dl_manager, split) for split in self.splits]
66
-
67
- def _generate_examples(self, datapath, datatype):
68
- with open(datapath, encoding="utf-8") as f:
69
- data = json.load(f)
70
- for sentence_counter, (sample_id, item) in enumerate(data.items()):
71
- resp = {
72
- "sample_id": sample_id,
73
- "speaker_id": str(item.get("speaker_id") or _NA),
74
- "origin": item["origin"],
75
- "char": item["char"],
76
- "phn": item["phn"],
77
- "homograph": item.get("homograph", _NA),
78
- "homograph_wordid": item.get("homograph_wordid", _NA),
79
- "homograph_char_start": item.get("homograph_char_start", 0),
80
- "homograph_char_end": item.get("homograph_char_end", 0),
81
- "homograph_phn_start": item.get("homograph_phn_start", 0),
82
- "homograph_phn_end": item.get("homograph_phn_end", 0),
83
- }
84
- yield sentence_counter, resp