parquet-converter commited on
Commit
fcf3dc6
1 Parent(s): 47638cc

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,43 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
38
- dataset/sentence_valid.json filter=lfs diff=lfs merge=lfs -text
39
- dataset/lexicon_test.json filter=lfs diff=lfs merge=lfs -text
40
- dataset/lexicon_train.json filter=lfs diff=lfs merge=lfs -text
41
- dataset/lexicon_valid.json filter=lfs diff=lfs merge=lfs -text
42
- dataset/sentence_test.json filter=lfs diff=lfs merge=lfs -text
43
- dataset/sentence_train.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,8 +0,0 @@
1
- # librig2p-nostress - Grapheme-To-Phoneme Dataset
2
-
3
- This dataset contains samples that can be used to train a Grapheme-to-Phoneme system **without** stress information.
4
-
5
- The dataset is derived from the following pre-existing datasets:
6
-
7
- * [LibriSpeech ASR Corpus](https://www.openslr.org/12)
8
- * [LibriSpeech Alignments](https://github.com/CorentinJ/librispeech-alignments)
 
 
 
 
 
 
 
 
 
dataset/sentence_train.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:764f4d65fa53102f7a08df04b6009c23bc8b2a43901f405e5a944378977d0b44
3
- size 179512436
 
 
 
 
dataset/lexicon_valid.json → default/librig2p-nostress-lexicon_test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cafad66ba83634303c0e139db060745043b0e1c54b4f9c4cb9fec34d3e65261a
3
- size 358546
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f513bcf524a14f4ba947cb3232b823a65f532633c6c5bc4f389bfdd900ff7133
3
+ size 56294
dataset/sentence_test.json → default/librig2p-nostress-lexicon_train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0c5d98a3fc0561c796b39dfa3b93d7c007c1fb5a17531536f3706936936edaf
3
- size 2765822
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55746b10172e8f6460c6b9cdc8113084167920581542efdaec0655deb4a52e1f
3
+ size 5219261
dataset/lexicon_test.json → default/librig2p-nostress-lexicon_valid.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:134d35619767566a61972c9c467f751553c1424471416d5ccd02be2ab7728070
3
- size 361373
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc783f58b887418321dadfd580eace5e7f1deed931e1d063e19d095ab2d0f5fd
3
+ size 55965
dataset/sentence_valid.json → default/librig2p-nostress-sentence_test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7471a443af29fc90350debaa3d90d03cd0e4cefe9807a4b9312577c79613776d
3
- size 2833542
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7493af3e410a9b7ddd40dc1abefeba4e6a5938db36b5bd790603aef40331e2be
3
+ size 360885
dataset/lexicon_train.json → default/librig2p-nostress-sentence_train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02bdbd4b798eeaeeee7d5f2a39d67167cb28a81d04b95b9f87f99fabb880ea01
3
- size 35208912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e273b372490c9ac212d842bd9317ada25e5e02b5aac27279b7b5740a367ce92
3
+ size 23900089
default/librig2p-nostress-sentence_valid.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f8daabc0b88fa3c7879dff40b788a06ddbfaa8d725eddef231a395b3973bd28
3
+ size 371836
librig2p-nostress.py DELETED
@@ -1,75 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 Artem Ploujnikov
3
-
4
-
5
- # Lint as: python3
6
- import json
7
-
8
- import datasets
9
-
10
- _DESCRIPTION = """
11
- Grapheme-to-Phoneme training, validation and test sets
12
- """
13
-
14
- _BASE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress/resolve/main/dataset"
15
- _HOMEPAGE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress"
16
- _NA = "N/A"
17
- _SPLIT_TYPES = ["train", "valid", "test"]
18
- _DATA_TYPES = ["lexicon", "sentence"]
19
- _SPLITS = [
20
- f"{data_type}_{split_type}"
21
- for data_type in _DATA_TYPES
22
- for split_type in _SPLIT_TYPES]
23
-
24
- class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
25
- def __init__(self, base_url=None, splits=None, *args, **kwargs):
26
- super().__init__(*args, **kwargs)
27
- self.base_url = base_url or _BASE_URL
28
- self.splits = splits or _SPLITS
29
-
30
- def _info(self):
31
- return datasets.DatasetInfo(
32
- description=_DESCRIPTION,
33
- features=datasets.Features(
34
- {
35
- "id": datasets.Value("string"),
36
- "speaker_id": datasets.Value("string"),
37
- "origin": datasets.Value("string"),
38
- "char": datasets.Value("string"),
39
- "phn": datasets.Sequence(datasets.Value("string")),
40
- },
41
- ),
42
- supervised_keys=None,
43
- homepage=_HOMEPAGE_URL,
44
- )
45
-
46
- def _get_url(self, split):
47
- return f'{self.base_url}/{split}.json'
48
-
49
- def _split_generator(self, dl_manager, split):
50
- url = self._get_url(split)
51
- path = dl_manager.download_and_extract(url)
52
- return datasets.SplitGenerator(
53
- name=split,
54
- gen_kwargs={"datapath": path, "datatype": split},
55
- )
56
-
57
- def _split_generators(self, dl_manager):
58
- return [
59
- self._split_generator(dl_manager, split)
60
- for split in self.splits
61
- ]
62
-
63
- def _generate_examples(self, datapath, datatype):
64
- with open(datapath, encoding="utf-8") as f:
65
- data = json.load(f)
66
-
67
- for sentence_counter, (item_id, item) in enumerate(data.items()):
68
- resp = {
69
- "id": item_id,
70
- "speaker_id": str(item.get("speaker_id") or _NA),
71
- "origin": item["origin"],
72
- "char": item["char"],
73
- "phn": item["phn"],
74
- }
75
- yield sentence_counter, resp