gcjavi commited on
Commit
27ad44d
1 Parent(s): d7908ec

file structure changed

Browse files
README.md CHANGED
@@ -8,19 +8,138 @@ license:
8
  multilinguality:
9
  - monolingual
10
  dataset_info:
11
- - config_name: config
12
- features:
13
- - name: path
14
- dtype: string
15
- - name: audio
16
- dtype:
17
- audio:
18
- sampling_rate: 16000
19
- - name: sentence
20
- dtype: string
21
- - name: speaker_id
22
- dtype: string
23
  ---
24
 
25
 
26
  # Dataset Card for [Dataset Name]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  multilinguality:
9
  - monolingual
10
  dataset_info:
11
+ - config_name: config
12
+ features:
13
+ - name: audio_id
14
+ dtype: string
15
+ - name: audio
16
+ dtype:
17
+ audio:
18
+ sampling_rate: 16000
19
+ - name: sentence
20
+ dtype: string
 
 
21
  ---
22
 
23
 
24
  # Dataset Card for [Dataset Name]
25
+
26
+ ## Table of Contents
27
+ - [Table of Contents](#table-of-contents)
28
+ - [Dataset Description](#dataset-description)
29
+ - [Dataset Summary](#dataset-summary)
30
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
31
+ - [Languages](#languages)
32
+ - [Dataset Structure](#dataset-structure)
33
+ - [Data Instances](#data-instances)
34
+ - [Data Fields](#data-fields)
35
+ - [Data Splits](#data-splits)
36
+ - [Dataset Creation](#dataset-creation)
37
+ - [Curation Rationale](#curation-rationale)
38
+ - [Source Data](#source-data)
39
+ - [Annotations](#annotations)
40
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
41
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
42
+ - [Social Impact of Dataset](#social-impact-of-dataset)
43
+ - [Discussion of Biases](#discussion-of-biases)
44
+ - [Other Known Limitations](#other-known-limitations)
45
+ - [Additional Information](#additional-information)
46
+ - [Dataset Curators](#dataset-curators)
47
+ - [Licensing Information](#licensing-information)
48
+ - [Citation Information](#citation-information)
49
+ - [Contributions](#contributions)
50
+
51
+ ## Dataset Description
52
+
53
+ - **Homepage:**
54
+ - **Repository:**
55
+ - **Paper:**
56
+ - **Leaderboard:**
57
+ - **Point of Contact:**
58
+
59
+ ### Dataset Summary
60
+
61
+ [More Information Needed]
62
+
63
+ ### Supported Tasks and Leaderboards
64
+
65
+ [More Information Needed]
66
+
67
+ ### Languages
68
+
69
+ [More Information Needed]
70
+
71
+ ## Dataset Structure
72
+
73
+ ### Data Instances
74
+
75
+ [More Information Needed]
76
+
77
+ ### Data Fields
78
+
79
+ [More Information Needed]
80
+
81
+ ### Data Splits
82
+
83
+ [More Information Needed]
84
+
85
+ ## Dataset Creation
86
+
87
+ ### Curation Rationale
88
+
89
+ [More Information Needed]
90
+
91
+ ### Source Data
92
+
93
+ #### Initial Data Collection and Normalization
94
+
95
+ [More Information Needed]
96
+
97
+ #### Who are the source language producers?
98
+
99
+ [More Information Needed]
100
+
101
+ ### Annotations
102
+
103
+ #### Annotation process
104
+
105
+ [More Information Needed]
106
+
107
+ #### Who are the annotators?
108
+
109
+ [More Information Needed]
110
+
111
+ ### Personal and Sensitive Information
112
+
113
+ [More Information Needed]
114
+
115
+ ## Considerations for Using the Data
116
+
117
+ ### Social Impact of Dataset
118
+
119
+ [More Information Needed]
120
+
121
+ ### Discussion of Biases
122
+
123
+ [More Information Needed]
124
+
125
+ ### Other Known Limitations
126
+
127
+ [More Information Needed]
128
+
129
+ ## Additional Information
130
+
131
+ ### Dataset Curators
132
+
133
+ [More Information Needed]
134
+
135
+ ### Licensing Information
136
+
137
+ [More Information Needed]
138
+
139
+ ### Citation Information
140
+
141
+ [More Information Needed]
142
+
143
+ ### Contributions
144
+
145
+ Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
test-user.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Lint as: python3
3
+ """test set"""
4
+
5
+
6
+ import csv
7
+ import os
8
+ import json
9
+
10
+ import datasets
11
+ from datasets.utils.py_utils import size_str
12
+ from tqdm import tqdm
13
+
14
+
15
+ _CITATION = """\
16
+ @inproceedings{panayotov2015librispeech,
17
+ title={Librispeech: an ASR corpus based on public domain audio books},
18
+ author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
19
+ booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
20
+ pages={5206--5210},
21
+ year={2015},
22
+ organization={IEEE}
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ Lorem ipsum
28
+ """
29
+
30
+
31
+ _BASE_URL = "https://huggingface.co/datasets/gcjavi/dataviewer-test"
32
+ _DATA_URL = "test.zip"
33
+ _PROMPTS_URLS = {"test": "test.tsv"}
34
+
35
+ logger = datasets.logging.get_logger(__name__)
36
+
37
+ class TestConfig(datasets.BuilderConfig):
38
+ """Lorem impsum."""
39
+
40
+ def __init__(self, name, **kwargs):
41
+ # self.language = kwargs.pop("language", None)
42
+ # self.release_date = kwargs.pop("release_date", None)
43
+ # self.num_clips = kwargs.pop("num_clips", None)
44
+ # self.num_speakers = kwargs.pop("num_speakers", None)
45
+ # self.validated_hr = kwargs.pop("validated_hr", None)
46
+ # self.total_hr = kwargs.pop("total_hr", None)
47
+ # self.size_bytes = kwargs.pop("size_bytes", None)
48
+ # self.size_human = size_str(self.size_bytes)
49
+ description = (
50
+ f"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor "
51
+ f"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud "
52
+ f"exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure "
53
+ f"dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. "
54
+ f"Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt "
55
+ f"mollit anim id est laborum."
56
+ )
57
+ super(TestConfig, self).__init__(
58
+ name=name,
59
+ description=description,
60
+ **kwargs,
61
+ )
62
+
63
+ class TestASR(datasets.GeneratorBasedBuilder):
64
+ """Lorem ipsum."""
65
+
66
+
67
+ BUILDER_CONFIGS = [
68
+ TestConfig(
69
+ name="test-dataset",
70
+ )
71
+ ]
72
+
73
+ def _info(self):
74
+ return datasets.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ features=datasets.Features(
77
+ {
78
+ "audio_id": datasets.Value("string"),
79
+ "audio": datasets.Audio(sampling_rate=16_000),
80
+ "sentence": datasets.Value("string")
81
+ }
82
+ ),
83
+ supervised_keys=None,
84
+ homepage=_BASE_URL,
85
+ citation=_CITATION
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ audio_path = dl_manager.download(_DATA_URL)
90
+ local_extracted_archive = dl_manager.extract(audio_path) if not dl_manager.is_streaming else None
91
+ meta_path = dl_manager.download(_PROMPTS_URLS)
92
+ return [datasets.SplitGenerator(
93
+ name=datasets.Split.TEST,
94
+ gen_kwargs={
95
+ "meta_path": meta_path["test"],
96
+ "audio_files": dl_manager.iter_archive(audio_path),
97
+ "local_extracted_archive": local_extracted_archive,
98
+ }
99
+ )]
100
+
101
+ def _generate_examples(self, meta_path, audio_files, local_extracted_archive):
102
+ """Lorem ipsum."""
103
+ data_fields = list(self._info().features.keys())
104
+ metadata = {}
105
+ with open(meta_path, encoding="utf-8") as f:
106
+ next(f)
107
+ for row in f:
108
+ print(row)
109
+ r = row.split("\t")
110
+ print(r)
111
+ audio_id = r[0]
112
+ sentence = r[1]
113
+ metadata[audio_id] = {"audio_id": audio_id,
114
+ "sentence": sentence}
115
+
116
+ id_ = 0
117
+ for path, f in audio_files:
118
+ print(path, f)
119
+ _, audio_name = os.path.split(path)
120
+ if audio_name in metadata:
121
+ result = dict(metadata[audio_name])
122
+ path = os.path.join(local_extracted_archive, "test", path) if local_extracted_archive else path
123
+ result["audio"] = {"path": path, "bytes":f.read()}
124
+ yield id_, result
125
+ id_ +=1
transcript/train.tsv → test.tsv RENAMED
@@ -1,5 +1,5 @@
1
- "path" "sentence" "speaker_id"
2
- "DSPG_011_16012017_4.98_5.67.wav" "bos días." "Speaker1"
3
- "DSPG_011_16012017_5.85_7.08.wav" "vamos a dar comezo a sesión" "Speaker2"
4
- "DSPG_011_16012017_40.98_41.97.wav" "abrese a sesión." "Speaker3"
5
- "DSPG_011_16012017_43.68_50.25.wav" "a orde do día está publicada no boletín oficial do parlamento de galicia polo que non procederemos á sua lectura." "Speaker4"
 
1
+ "audio_id" "sentence"
2
+ "DSPG_011_16012017_4.98_5.67.wav" "bos días."
3
+ "DSPG_011_16012017_5.85_7.08.wav" "vamos a dar comezo a sesión"
4
+ "DSPG_011_16012017_40.98_41.97.wav" "abrese a sesión."
5
+ "DSPG_011_16012017_43.68_50.25.wav" "a orde do día está publicada no boletín oficial do parlamento de galicia polo que non procederemos á sua lectura."
data/train.zip → test.zip RENAMED
File without changes