holylovenia albertvillanova HF staff commited on
Commit
737e980
1 Parent(s): b966160

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (b65b9bb87a0412eb94a659660819060825e74b9f)
- Delete loading script (be547ddd15ecb44291cdef011cdd4d7b99afea54)
- Delete data file (d0ed8f9a70f6b49e86c8f3f1f59f75d3fddf9a00)
- Delete data file (8373177a94199e0dfb50c480f3cef49aae9a88b0)
- Delete legacy dataset_infos.json (68b1bd56ab0e54d5bea449ca5ee170f2aef36ca7)
- Delete data file (fe3d48f37d472dc9dac1c013f0b78106feeee768)
- Delete data file (9df429a31da8e5801d1383fbe98b946003e53339)
- Delete data file (2b0d800a4d17e1448abafa715b7f1a1d63a35cde)
- Delete data file (0e204fafddc7d368bd91417b08588d402aa93547)


Co-authored-by: Albert Villanova <albertvillanova@users.noreply.huggingface.co>

ASCEND.py DELETED
@@ -1,139 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ Common Voice Dataset"""
16
-
17
- from datasets import AutomaticSpeechRecognition
18
-
19
-
20
- import datasets
21
- import os
22
- import pandas as pd
23
-
24
-
25
- _CITATION = """\
26
- @inproceedings{lovenia2021ascend,
27
- title = {ASCEND: A Spontaneous Chinese-English Dataset for Code-switching in Multi-turn Conversation},
28
- author = {Lovenia, Holy and Cahyawijaya, Samuel and Winata, Genta Indra and Xu, Peng and Yan, Xu and Liu, Zihan and Frieske, Rita and Yu, Tiezheng and Dai, Wenliang and Barezi, Elham J and others},
29
- booktitle = {Proceedings of the International Conference on Language Resources and Evaluation, {LREC} 2022, 20-25 June 2022, Lu Palais du Pharo, France},
30
- publisher = {European Language Resources Association},
31
- year = {2022},
32
- pages = {}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- ASCEND (A Spontaneous Chinese-English Dataset) introduces a high-quality resource of spontaneous multi-turn conversational dialogue Chinese-English code-switching corpus collected in Hong Kong. ASCEND consists of 10.62 hours of spontaneous speech with a total of ~12.3K utterances. The corpus is split into 3 sets: training, validation, and test with a ratio of 8:1:1 while maintaining a balanced gender proportion on each set.
38
- """
39
-
40
- _HOMEPAGE = "https://huggingface.co/datasets/CAiRE/ASCEND"
41
-
42
- _URL = "https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/"
43
- _URLS = {
44
- "train": _URL + "train_metadata.csv",
45
- "test": _URL + "test_metadata.csv",
46
- "validation": _URL + "validation_metadata.csv",
47
- "waves": "https://huggingface.co/datasets/CAiRE/ASCEND/resolve/main/waves.tar.bz2",
48
- }
49
-
50
-
51
- class ASCENDConfig(datasets.BuilderConfig):
52
- """BuilderConfig for ASCEND."""
53
-
54
- def __init__(self, name="main", **kwargs):
55
- """
56
- Args:
57
- **kwargs: keyword arguments forwarded to super.
58
- """
59
- super(ASCENDConfig, self).__init__(name, **kwargs)
60
-
61
-
62
- class ASCEND(datasets.GeneratorBasedBuilder):
63
- """ASCEND: A Spontaneous Chinese-English Dataset for code-switching. Snapshot date: 5 January 2022."""
64
-
65
- BUILDER_CONFIGS = [
66
- ASCENDConfig(
67
- name="main",
68
- version=datasets.Version("1.0.0", ""),
69
- description=_DESCRIPTION,
70
- )
71
- ]
72
-
73
- def _info(self):
74
- features = datasets.Features(
75
- {
76
- "id": datasets.Value("string"),
77
- "path": datasets.Value("string"),
78
- "audio": datasets.Audio(sampling_rate=16_000),
79
- "transcription": datasets.Value("string"),
80
- "duration": datasets.Value("float32"),
81
- "language": datasets.Value("string"),
82
- "original_speaker_id": datasets.Value("int64"),
83
- "session_id": datasets.Value("int64"),
84
- "topic": datasets.Value("string"),
85
- }
86
- )
87
- return datasets.DatasetInfo(
88
- description=_DESCRIPTION,
89
- features=features,
90
- supervised_keys=None,
91
- homepage=_HOMEPAGE,
92
- citation=_CITATION,
93
- task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="transcription")],
94
- )
95
-
96
- def _split_generators(self, dl_manager):
97
- downloaded_files = dl_manager.download_and_extract(_URLS)
98
-
99
- return [
100
- datasets.SplitGenerator(
101
- name=datasets.Split.TRAIN,
102
- gen_kwargs={
103
- "metadata_path": downloaded_files["train"],
104
- "wave_path": downloaded_files["waves"],
105
- },
106
- ),
107
- datasets.SplitGenerator(
108
- name=datasets.Split.TEST,
109
- gen_kwargs={
110
- "metadata_path": downloaded_files["test"],
111
- "wave_path": downloaded_files["waves"],
112
- },
113
- ),
114
- datasets.SplitGenerator(
115
- name=datasets.Split.VALIDATION,
116
- gen_kwargs={
117
- "metadata_path": downloaded_files["validation"],
118
- "wave_path": downloaded_files["waves"],
119
- },
120
- ),
121
- ]
122
-
123
- def _generate_examples(self, metadata_path, wave_path):
124
- print(metadata_path)
125
- metadata_df = pd.read_csv(metadata_path)
126
-
127
- for index, row in metadata_df.iterrows():
128
- example = {
129
- "id": str(index).zfill(5),
130
- "path": os.path.join(wave_path, row["file_name"]),
131
- "audio": os.path.join(wave_path, row["file_name"]),
132
- "transcription": row["transcription"],
133
- "duration": row["duration"],
134
- "language": row["language"],
135
- "original_speaker_id": row["original_speaker_id"],
136
- "session_id": row["session_id"],
137
- "topic": row["topic"],
138
- }
139
- yield index, example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ASCEND.py.lock DELETED
File without changes
README.md CHANGED
@@ -22,6 +22,51 @@ pretty_name: 'ASCEND: A Spontaneous Chinese-English Dataset for Code-switching i
22
  tags:
23
  - speech-recognition
24
  - code-switching
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  ---
26
 
27
  # Dataset Card for ASCEND
 
22
  tags:
23
  - speech-recognition
24
  - code-switching
25
+ dataset_info:
26
+ config_name: main
27
+ features:
28
+ - name: id
29
+ dtype: string
30
+ - name: path
31
+ dtype: string
32
+ - name: audio
33
+ dtype:
34
+ audio:
35
+ sampling_rate: 16000
36
+ - name: transcription
37
+ dtype: string
38
+ - name: duration
39
+ dtype: float32
40
+ - name: language
41
+ dtype: string
42
+ - name: original_speaker_id
43
+ dtype: int64
44
+ - name: session_id
45
+ dtype: int64
46
+ - name: topic
47
+ dtype: string
48
+ splits:
49
+ - name: train
50
+ num_bytes: 1014573740.14
51
+ num_examples: 9869
52
+ - name: test
53
+ num_bytes: 106171230.135
54
+ num_examples: 1315
55
+ - name: validation
56
+ num_bytes: 106772517.43
57
+ num_examples: 1130
58
+ download_size: 1223536062
59
+ dataset_size: 1227517487.7050002
60
+ configs:
61
+ - config_name: main
62
+ data_files:
63
+ - split: train
64
+ path: main/train-*
65
+ - split: test
66
+ path: main/test-*
67
+ - split: validation
68
+ path: main/validation-*
69
+ default: true
70
  ---
71
 
72
  # Dataset Card for ASCEND
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"train": {"description": "ASCEND (A Spontaneous Chinese-English Dataset) introduces a high-quality resource of spontaneous multi-turn conversational dialogue Chinese-English code-switching corpus collected in Hong Kong. ASCEND consists of 10.62 hours of spontaneous speech with a total of ~12.3K utterances. The corpus is split into 3 sets: training, validation, and test with a ratio of 8:1:1 while maintaining a balanced gender proportion on each set.\n", "citation": "@inproceedings{lovenia2021ascend,\n title = {ASCEND: A Spontaneous Chinese-English Dataset for Code-switching in Multi-turn Conversation},\n author = {Lovenia, Holy and Cahyawijaya, Samuel and Winata, Genta Indra and Xu, Peng and Yan, Xu and Liu, Zihan and Frieske, Rita and Yu, Tiezheng and Dai, Wenliang and Barezi, Elham J and others},\n booktitle = {Proceedings of the International Conference on Language Resources and Evaluation, {LREC} 2022, 20-25 June 2022, Lu Palais du Pharo, France},\n publisher = {European Language Resources Association},\n year = {2022},\n pages = {}\n}\n", "homepage": "https://huggingface.co/datasets/CAiRE/ASCEND", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "audio": {"sampling_rate": 16000, "mono": true, "decode": true, "id": null, "_type": "Audio"}, "transcription": {"dtype": "string", "id": null, "_type": "Value"}, "duration": {"dtype": "float32", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "original_speaker_id": {"dtype": "int64", "id": null, "_type": "Value"}, "session_id": {"dtype": "int64", "id": null, "_type": "Value"}, "topic": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "automatic-speech-recognition", "audio_column": "audio", "transcription_column": "transcription"}], "builder_name": "ascend", "config_name": "train", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4316724, "num_examples": 9869, "dataset_name": "ascend"}, "test": {"name": "test", "num_bytes": 559170, "num_examples": 1315, "dataset_name": "ascend"}, "validation": {"name": "validation", "num_bytes": 489562, "num_examples": 1130, "dataset_name": "ascend"}}, "download_checksums": {"https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/train_metadata.csv": {"num_bytes": 1081181, "checksum": "4cbdf90fe9bf53640bfc285e2539b468a6e412daeb17c36a1b5da478cd9f5b29"}, "https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/test_metadata.csv": {"num_bytes": 127658, "checksum": "15689bc1c1a0bc29b250f63221576392b627da9cc1d80e51bb1a422118b9732c"}, "https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/validation_metadata.csv": {"num_bytes": 118552, "checksum": "6e53e362991b23ffa49ed991c6062a51d8f286747f341e566c897c02bee72459"}, "https://huggingface.co/datasets/CAiRE/ASCEND/resolve/main/waves.tar.bz2": {"num_bytes": 929707032, "checksum": "b35cc295f1310535a8e250d534aee0adeb90bccbc027a442cdbef81146894529"}}, "download_size": 931034423, "post_processing_size": null, "dataset_size": 5365456, "size_in_bytes": 936399879}, "validation": {"description": "ASCEND (A Spontaneous Chinese-English Dataset) introduces a high-quality resource of spontaneous multi-turn conversational dialogue Chinese-English code-switching corpus collected in Hong Kong. ASCEND consists of 10.62 hours of spontaneous speech with a total of ~12.3K utterances. The corpus is split into 3 sets: training, validation, and test with a ratio of 8:1:1 while maintaining a balanced gender proportion on each set.\n", "citation": "@inproceedings{lovenia2021ascend,\n title = {ASCEND: A Spontaneous Chinese-English Dataset for Code-switching in Multi-turn Conversation},\n author = {Lovenia, Holy and Cahyawijaya, Samuel and Winata, Genta Indra and Xu, Peng and Yan, Xu and Liu, Zihan and Frieske, Rita and Yu, Tiezheng and Dai, Wenliang and Barezi, Elham J and others},\n booktitle = {Proceedings of the International Conference on Language Resources and Evaluation, {LREC} 2022, 20-25 June 2022, Lu Palais du Pharo, France},\n publisher = {European Language Resources Association},\n year = {2022},\n pages = {}\n}\n", "homepage": "https://huggingface.co/datasets/CAiRE/ASCEND", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "audio": {"sampling_rate": 16000, "mono": true, "decode": true, "id": null, "_type": "Audio"}, "transcription": {"dtype": "string", "id": null, "_type": "Value"}, "duration": {"dtype": "float32", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "original_speaker_id": {"dtype": "int64", "id": null, "_type": "Value"}, "session_id": {"dtype": "int64", "id": null, "_type": "Value"}, "topic": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "automatic-speech-recognition", "audio_column": "audio", "transcription_column": "transcription"}], "builder_name": "ascend", "config_name": "validation", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4316724, "num_examples": 9869, "dataset_name": "ascend"}, "test": {"name": "test", "num_bytes": 559170, "num_examples": 1315, "dataset_name": "ascend"}, "validation": {"name": "validation", "num_bytes": 489562, "num_examples": 1130, "dataset_name": "ascend"}}, "download_checksums": {"https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/train_metadata.csv": {"num_bytes": 1081181, "checksum": "4cbdf90fe9bf53640bfc285e2539b468a6e412daeb17c36a1b5da478cd9f5b29"}, "https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/test_metadata.csv": {"num_bytes": 127658, "checksum": "15689bc1c1a0bc29b250f63221576392b627da9cc1d80e51bb1a422118b9732c"}, "https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/validation_metadata.csv": {"num_bytes": 118552, "checksum": "6e53e362991b23ffa49ed991c6062a51d8f286747f341e566c897c02bee72459"}, "https://huggingface.co/datasets/CAiRE/ASCEND/resolve/main/waves.tar.bz2": {"num_bytes": 929707032, "checksum": "b35cc295f1310535a8e250d534aee0adeb90bccbc027a442cdbef81146894529"}}, "download_size": 931034423, "post_processing_size": null, "dataset_size": 5365456, "size_in_bytes": 936399879}, "test": {"description": "ASCEND (A Spontaneous Chinese-English Dataset) introduces a high-quality resource of spontaneous multi-turn conversational dialogue Chinese-English code-switching corpus collected in Hong Kong. ASCEND consists of 10.62 hours of spontaneous speech with a total of ~12.3K utterances. The corpus is split into 3 sets: training, validation, and test with a ratio of 8:1:1 while maintaining a balanced gender proportion on each set.\n", "citation": "@inproceedings{lovenia2021ascend,\n title = {ASCEND: A Spontaneous Chinese-English Dataset for Code-switching in Multi-turn Conversation},\n author = {Lovenia, Holy and Cahyawijaya, Samuel and Winata, Genta Indra and Xu, Peng and Yan, Xu and Liu, Zihan and Frieske, Rita and Yu, Tiezheng and Dai, Wenliang and Barezi, Elham J and others},\n booktitle = {Proceedings of the International Conference on Language Resources and Evaluation, {LREC} 2022, 20-25 June 2022, Lu Palais du Pharo, France},\n publisher = {European Language Resources Association},\n year = {2022},\n pages = {}\n}\n", "homepage": "https://huggingface.co/datasets/CAiRE/ASCEND", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "path": {"dtype": "string", "id": null, "_type": "Value"}, "audio": {"sampling_rate": 16000, "mono": true, "decode": true, "id": null, "_type": "Audio"}, "transcription": {"dtype": "string", "id": null, "_type": "Value"}, "duration": {"dtype": "float32", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "original_speaker_id": {"dtype": "int64", "id": null, "_type": "Value"}, "session_id": {"dtype": "int64", "id": null, "_type": "Value"}, "topic": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "automatic-speech-recognition", "audio_column": "audio", "transcription_column": "transcription"}], "builder_name": "ascend", "config_name": "test", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4316724, "num_examples": 9869, "dataset_name": "ascend"}, "test": {"name": "test", "num_bytes": 559170, "num_examples": 1315, "dataset_name": "ascend"}, "validation": {"name": "validation", "num_bytes": 489562, "num_examples": 1130, "dataset_name": "ascend"}}, "download_checksums": {"https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/train_metadata.csv": {"num_bytes": 1081181, "checksum": "4cbdf90fe9bf53640bfc285e2539b468a6e412daeb17c36a1b5da478cd9f5b29"}, "https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/test_metadata.csv": {"num_bytes": 127658, "checksum": "15689bc1c1a0bc29b250f63221576392b627da9cc1d80e51bb1a422118b9732c"}, "https://huggingface.co/datasets/CAiRE/ASCEND/raw/main/validation_metadata.csv": {"num_bytes": 118552, "checksum": "6e53e362991b23ffa49ed991c6062a51d8f286747f341e566c897c02bee72459"}, "https://huggingface.co/datasets/CAiRE/ASCEND/resolve/main/waves.tar.bz2": {"num_bytes": 929707032, "checksum": "b35cc295f1310535a8e250d534aee0adeb90bccbc027a442cdbef81146894529"}}, "download_size": 931034423, "post_processing_size": null, "dataset_size": 5365456, "size_in_bytes": 936399879}}
 
 
waves.tar.bz2 → main/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b35cc295f1310535a8e250d534aee0adeb90bccbc027a442cdbef81146894529
3
- size 929707032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c81d2b5ed6124f052089a695972808c16e0ce0c365ec9773c5d1a8fcf043a7
3
+ size 105756434
main/train-00000-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d66ba76f324e0711b779cfb01ee4e772a24a929e1d77a2063cde0506f75976f
3
+ size 316735328
main/train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:569f84f771c3637ca8535bd35e10e62feed2c240833534711909a9c04f51e589
3
+ size 366824932
main/train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa76b7ef4a74ff111fd1d2573d0b69e6b6f901df6054618d8e5658a7a394523e
3
+ size 327687102
main/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bdec53d2abfd3dd4f0d86a6df4e27e60f20660edc9b66055ae0ef8ec05cf7e2
3
+ size 106532266
speakers.csv DELETED
@@ -1,24 +0,0 @@
1
- speaker_id,age,gender,split,years_in_english_study,english_score
2
- 1,23,female,train,17,6
3
- 2,22,male,train,13,6
4
- 3,25,male,test,15,6
5
- 4,24,male,train,10,7.5
6
- 5,24,female,train,12,7
7
- 6,30,female,train,12,7
8
- 7,23,female,val,17,7
9
- 8,23,female,train,15,6
10
- 9,27,male,train,12,6
11
- 10,23,male,train,12,6
12
- 11,23,female,train,16,8
13
- 12,23,male,val,10,5.5
14
- 13,23,female,train,16,7
15
- 14,22,female,train,12,6.5
16
- 15,19,female,train,8,7
17
- 16,26,male,train,15,6.5
18
- 17,24,female,test,11,6.5
19
- 18,23,male,train,5,6.5
20
- 19,26,male,train,16,7
21
- 20,26,female,val,18,7.5
22
- 21,23,female,train,15,6.5
23
- 24,27,male,train,17,6
24
- 26,23,female,train,10,7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test_metadata.csv DELETED
The diff for this file is too large to render. See raw diff
 
train_metadata.csv DELETED
The diff for this file is too large to render. See raw diff
 
validation_metadata.csv DELETED
The diff for this file is too large to render. See raw diff