Sasha Luccioni commited on
Commit
fe550f4
1 Parent(s): 9270e24

Eval metadata batch 3: Reddit, Rotten Tomatoes, SemEval 2010, Sentiment 140, SMS Spam, Snips, SQuAD, SQuAD v2, Timit ASR (#4337)

Browse files

* Eval metadata batch 3: Quora, Reddit, Rotten Tomatoes, SemEval 2010, Sentiment 140, SMS Spam, Snips, SQuAD, SQuAD v2, Timit ASR

* Update datasets/quora/README.md

Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>

* Update README.md

removing ROUGE args

* Update datasets/rotten_tomatoes/README.md

Co-authored-by: lewtun <lewis.c.tunstall@gmail.com>

* Update datasets/rotten_tomatoes/README.md

Co-authored-by: lewtun <lewis.c.tunstall@gmail.com>

* Update datasets/squad/README.md

Co-authored-by: lewtun <lewis.c.tunstall@gmail.com>

* Update datasets/squad_v2/README.md

Co-authored-by: lewtun <lewis.c.tunstall@gmail.com>

* Update datasets/squad/README.md

Co-authored-by: lewtun <lewis.c.tunstall@gmail.com>

* Update datasets/squad_v2/README.md

Co-authored-by: lewtun <lewis.c.tunstall@gmail.com>

* Update datasets/squad_v2/README.md

Co-authored-by: lewtun <lewis.c.tunstall@gmail.com>

* Update README.md

removing eval for quora

Co-authored-by: sashavor <sasha.luccioni@huggingface.co>
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
Co-authored-by: lewtun <lewis.c.tunstall@gmail.com>

Commit from https://github.com/huggingface/datasets/commit/8ccf58b77343f323ba6654250f88b69699a57b8e

Files changed (1) hide show
  1. README.md +57 -42
README.md CHANGED
@@ -18,6 +18,21 @@ task_categories:
18
  - automatic-speech-recognition
19
  task_ids: []
20
  paperswithcode_id: timit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  ---
22
 
23
  # Dataset Card for timit_asr
@@ -84,50 +99,50 @@ A typical data point comprises the path to the audio file, usually called `file`
84
 
85
  ```
86
  {
87
- 'file': '/data/TRAIN/DR4/MMDM0/SI681.WAV',
88
  'audio': {'path': '/data/TRAIN/DR4/MMDM0/SI681.WAV',
89
  'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32),
90
  'sampling_rate': 16000},
91
- 'text': 'Would such an act of refusal be useful?',
92
- 'phonetic_detail': [{'start': '0', 'stop': '1960', 'utterance': 'h#'},
93
- {'start': '1960', 'stop': '2466', 'utterance': 'w'},
94
- {'start': '2466', 'stop': '3480', 'utterance': 'ix'},
95
- {'start': '3480', 'stop': '4000', 'utterance': 'dcl'},
96
- {'start': '4000', 'stop': '5960', 'utterance': 's'},
97
- {'start': '5960', 'stop': '7480', 'utterance': 'ah'},
98
- {'start': '7480', 'stop': '7880', 'utterance': 'tcl'},
99
- {'start': '7880', 'stop': '9400', 'utterance': 'ch'},
100
- {'start': '9400', 'stop': '9960', 'utterance': 'ix'},
101
- {'start': '9960', 'stop': '10680', 'utterance': 'n'},
102
- {'start': '10680', 'stop': '13480', 'utterance': 'ae'},
103
- {'start': '13480', 'stop': '15680', 'utterance': 'kcl'},
104
- {'start': '15680', 'stop': '15880', 'utterance': 't'},
105
- {'start': '15880', 'stop': '16920', 'utterance': 'ix'},
106
- {'start': '16920', 'stop': '18297', 'utterance': 'v'},
107
- {'start': '18297', 'stop': '18882', 'utterance': 'r'},
108
- {'start': '18882', 'stop': '19480', 'utterance': 'ix'},
109
- {'start': '19480', 'stop': '21723', 'utterance': 'f'},
110
- {'start': '21723', 'stop': '22516', 'utterance': 'y'},
111
- {'start': '22516', 'stop': '24040', 'utterance': 'ux'},
112
- {'start': '24040', 'stop': '25190', 'utterance': 'zh'},
113
- {'start': '25190', 'stop': '27080', 'utterance': 'el'},
114
- {'start': '27080', 'stop': '28160', 'utterance': 'bcl'},
115
- {'start': '28160', 'stop': '28560', 'utterance': 'b'},
116
- {'start': '28560', 'stop': '30120', 'utterance': 'iy'},
117
- {'start': '30120', 'stop': '31832', 'utterance': 'y'},
118
- {'start': '31832', 'stop': '33240', 'utterance': 'ux'},
119
- {'start': '33240', 'stop': '34640', 'utterance': 's'},
120
- {'start': '34640', 'stop': '35968', 'utterance': 'f'},
121
- {'start': '35968', 'stop': '37720', 'utterance': 'el'},
122
- {'start': '37720', 'stop': '39920', 'utterance': 'h#'}],
123
- 'word_detail': [{'start': '1960', 'stop': '4000', 'utterance': 'would'},
124
- {'start': '4000', 'stop': '9400', 'utterance': 'such'},
125
- {'start': '9400', 'stop': '10680', 'utterance': 'an'},
126
- {'start': '10680', 'stop': '15880', 'utterance': 'act'},
127
- {'start': '15880', 'stop': '18297', 'utterance': 'of'},
128
- {'start': '18297', 'stop': '27080', 'utterance': 'refusal'},
129
- {'start': '27080', 'stop': '30120', 'utterance': 'be'},
130
- {'start': '30120', 'stop': '37720', 'utterance': 'useful'}],
131
 
132
  'dialect_region': 'DR4',
133
  'sentence_type': 'SI',
@@ -164,7 +179,7 @@ The speech material has been subdivided into portions for training and
164
  testing. The default train-test split will be made available on data download.
165
 
166
  The test data alone has a core portion containing 24 speakers, 2 male and 1 female
167
- from each dialect region. More information about the test set can
168
  be found [here](https://catalog.ldc.upenn.edu/docs/LDC93S1/TESTSET.TXT)
169
 
170
 
 
18
  - automatic-speech-recognition
19
  task_ids: []
20
  paperswithcode_id: timit
21
+ train-eval-index:
22
+ - config: clean
23
+ task: automatic-speech-recognition
24
+ task_id: speech_recognition
25
+ splits:
26
+ train_split: train
27
+ eval_split: test
28
+ col_mapping:
29
+ file: path
30
+ text: text
31
+ metrics:
32
+ - type: wer
33
+ name: WER
34
+ - type: cer
35
+ name: CER
36
  ---
37
 
38
  # Dataset Card for timit_asr
 
99
 
100
  ```
101
  {
102
+ 'file': '/data/TRAIN/DR4/MMDM0/SI681.WAV',
103
  'audio': {'path': '/data/TRAIN/DR4/MMDM0/SI681.WAV',
104
  'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32),
105
  'sampling_rate': 16000},
106
+ 'text': 'Would such an act of refusal be useful?',
107
+ 'phonetic_detail': [{'start': '0', 'stop': '1960', 'utterance': 'h#'},
108
+ {'start': '1960', 'stop': '2466', 'utterance': 'w'},
109
+ {'start': '2466', 'stop': '3480', 'utterance': 'ix'},
110
+ {'start': '3480', 'stop': '4000', 'utterance': 'dcl'},
111
+ {'start': '4000', 'stop': '5960', 'utterance': 's'},
112
+ {'start': '5960', 'stop': '7480', 'utterance': 'ah'},
113
+ {'start': '7480', 'stop': '7880', 'utterance': 'tcl'},
114
+ {'start': '7880', 'stop': '9400', 'utterance': 'ch'},
115
+ {'start': '9400', 'stop': '9960', 'utterance': 'ix'},
116
+ {'start': '9960', 'stop': '10680', 'utterance': 'n'},
117
+ {'start': '10680', 'stop': '13480', 'utterance': 'ae'},
118
+ {'start': '13480', 'stop': '15680', 'utterance': 'kcl'},
119
+ {'start': '15680', 'stop': '15880', 'utterance': 't'},
120
+ {'start': '15880', 'stop': '16920', 'utterance': 'ix'},
121
+ {'start': '16920', 'stop': '18297', 'utterance': 'v'},
122
+ {'start': '18297', 'stop': '18882', 'utterance': 'r'},
123
+ {'start': '18882', 'stop': '19480', 'utterance': 'ix'},
124
+ {'start': '19480', 'stop': '21723', 'utterance': 'f'},
125
+ {'start': '21723', 'stop': '22516', 'utterance': 'y'},
126
+ {'start': '22516', 'stop': '24040', 'utterance': 'ux'},
127
+ {'start': '24040', 'stop': '25190', 'utterance': 'zh'},
128
+ {'start': '25190', 'stop': '27080', 'utterance': 'el'},
129
+ {'start': '27080', 'stop': '28160', 'utterance': 'bcl'},
130
+ {'start': '28160', 'stop': '28560', 'utterance': 'b'},
131
+ {'start': '28560', 'stop': '30120', 'utterance': 'iy'},
132
+ {'start': '30120', 'stop': '31832', 'utterance': 'y'},
133
+ {'start': '31832', 'stop': '33240', 'utterance': 'ux'},
134
+ {'start': '33240', 'stop': '34640', 'utterance': 's'},
135
+ {'start': '34640', 'stop': '35968', 'utterance': 'f'},
136
+ {'start': '35968', 'stop': '37720', 'utterance': 'el'},
137
+ {'start': '37720', 'stop': '39920', 'utterance': 'h#'}],
138
+ 'word_detail': [{'start': '1960', 'stop': '4000', 'utterance': 'would'},
139
+ {'start': '4000', 'stop': '9400', 'utterance': 'such'},
140
+ {'start': '9400', 'stop': '10680', 'utterance': 'an'},
141
+ {'start': '10680', 'stop': '15880', 'utterance': 'act'},
142
+ {'start': '15880', 'stop': '18297', 'utterance': 'of'},
143
+ {'start': '18297', 'stop': '27080', 'utterance': 'refusal'},
144
+ {'start': '27080', 'stop': '30120', 'utterance': 'be'},
145
+ {'start': '30120', 'stop': '37720', 'utterance': 'useful'}],
146
 
147
  'dialect_region': 'DR4',
148
  'sentence_type': 'SI',
 
179
  testing. The default train-test split will be made available on data download.
180
 
181
  The test data alone has a core portion containing 24 speakers, 2 male and 1 female
182
+ from each dialect region. More information about the test set can
183
  be found [here](https://catalog.ldc.upenn.edu/docs/LDC93S1/TESTSET.TXT)
184
 
185