File size: 7,329 Bytes
1f3a4ec
 
 
 
 
 
 
 
 
 
 
166575b
 
1f3a4ec
 
 
 
f3cadf1
166575b
 
1f3a4ec
 
166575b
 
 
1f3a4ec
 
166575b
 
 
1f3a4ec
 
 
166575b
 
 
1f3a4ec
 
 
166575b
 
 
1f3a4ec
166575b
 
 
1f3a4ec
 
166575b
 
 
1f3a4ec
 
 
 
 
 
166575b
 
 
1f3a4ec
166575b
 
 
4c1d731
1f3a4ec
 
 
 
1d32376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24f8346
1d32376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166575b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
asr_datsets = {'LibriSpeech-Test-Clean': 'A clean, high-quality testset of the LibriSpeech dataset, used for ASR testing.', 
                'LibriSpeech-Test-Other' : 'A more challenging, noisier testset of the LibriSpeech dataset for ASR testing.',
                'Common-Voice-15-En-Test': 'Test set from the Common Voice project, which is a crowd-sourced, multilingual speech dataset.',
                'Peoples-Speech-Test'    : 'A large-scale, open-source speech recognition dataset, with diverse accents and domains.',
                'GigaSpeech-Test'        : 'A large-scale ASR dataset with diverse audio sources like podcasts, interviews, etc.',
                'Earnings21-Test'        : 'ASR test dataset focused on earnings calls from 2021, with professional speech and financial jargon.',
                'Earnings22-Test'        : 'Similar to Earnings21, but covering earnings calls from 2022.',
                'Tedlium3-Test'          : 'A test set derived from TED talks, covering diverse speakers and topics.',
                'Tedlium3-Long-form-Test': 'A longer version of the TED-LIUM dataset, containing extended audio samples. This poses challenges to existing fusion methods in handling long audios. However, it provides benchmark for future development.',
                'IMDA-Part1-ASR-Test'    : 'Speech recognition test data from the IMDA NSC project, Part 1.',
                'IMDA-Part2-ASR-Test'    : 'Speech recognition test data from the IMDA NSC project, Part 1.'
                }

sqa_datasets = {'CN-College-Listen-MCQ-Test': 'Chinese College English Listening Test, with multiple-choice questions.', 
                'DREAM-TTS-MCQ-Test': 'DREAM dataset for spoken question-answering, derived from textual data and synthesized speech.', 
                'SLUE-P2-SQA5-Test': 'Spoken Language Understanding Evaluation (SLUE) dataset, part 2, focused on QA tasks.', 
                'Public-SG-Speech-QA-Test': 'Public dataset for speech-based question answering, gathered from Singapore.', 
                'Spoken-Squad-Test': 'Spoken SQuAD dataset, based on the textual SQuAD dataset, converted into audio.'
                }

si_datasets = {'OpenHermes-Audio-Test': 'Test set for spoken instructions. Synthesized from the OpenHermes dataset.', 
               'ALPACA-Audio-Test': 'Spoken version of the ALPACA dataset, used for evaluating instruction following in audio.'
               }

ac_datasets = {
    'WavCaps-Test': 'WavCaps is a dataset for testing audio captioning, where models generate textual descriptions of audio clips.', 
    'AudioCaps-Test': 'AudioCaps dataset, used for generating captions from general audio events.'
}

asqa_datasets = {
    'Clotho-AQA-Test': 'Clotho dataset adapted for audio-based question answering, containing audio clips and questions.', 
    'WavCaps-QA-Test': 'Question-answering test dataset derived from WavCaps, focusing on audio content.', 
    'AudioCaps-QA-Test': 'AudioCaps adapted for question-answering tasks, using audio events as input for Q&A.'
}

er_datasets = {
    'IEMOCAP-Emotion-Test': 'Emotion recognition test data from the IEMOCAP dataset, focusing on identifying emotions in speech.', 
    'MELD-Sentiment-Test': 'Sentiment recognition from speech using the MELD dataset, classifying positive, negative, or neutral sentiments.', 
    'MELD-Emotion-Test': 'Emotion classification in speech using MELD, detecting specific emotions like happiness, anger, etc.'
}

ar_datsets = {
    'VoxCeleb-Accent-Test': 'Test dataset for accent recognition, based on VoxCeleb, a large speaker identification dataset.'
}

gr_datasets = {
    'VoxCeleb-Gender-Test': 'Test dataset for gender classification, also derived from VoxCeleb.', 
    'IEMOCAP-Gender-Test': 'Gender classification based on the IEMOCAP dataset.'
}

spt_datasets = {
    'Covost2-EN-ID-test': 'Covost 2 dataset for speech translation from English to Indonesian.',
    'Covost2-EN-ZH-test': 'Covost 2 dataset for speech translation from English to Chinese.',
    'Covost2-EN-TA-test': 'Covost 2 dataset for speech translation from English to Tamil.',
    'Covost2-ID-EN-test': 'Covost 2 dataset for speech translation from Indonesian to English.',
    'Covost2-ZH-EN-test': 'Covost 2 dataset for speech translation from Chinese to English.',
    'Covost2-TA-EN-test': 'Covost 2 dataset for speech translation from Tamil to English.'
}

cnasr_datasets = {
    'Aishell-ASR-ZH-Test': 'ASR test dataset for Mandarin Chinese, based on the Aishell dataset.'
}

metrics = {
    'wer': 'Word Error Rate (WER), a common metric for ASR evaluation. (The lower, the better)',
    'llama3_70b_judge_binary': 'Binary evaluation using the LLAMA3-70B model, for tasks requiring a binary outcome. (0-100 based on score 0-1)',
    'llama3_70b_judge': 'General evaluation using the LLAMA3-70B model, typically scoring based on subjective judgments. (0-100 based on score 0-5)',
    'meteor': 'METEOR, a metric used for evaluating text generation, often used in translation or summarization tasks. (Sensitive to output length)',
    'bleu': 'BLEU (Bilingual Evaluation Understudy), another text generation evaluation metric commonly used in machine translation. (Sensitive to output length)',
}

metrics_info = {
    'wer': 'Word Error Rate (WER) - The Lower, the better.',
    'llama3_70b_judge_binary': 'Model-as-a-Judge Peformance. Using LLAMA-3-70B. Scale from 0-100. The higher, the better.',
    'llama3_70b_judge': 'Model-as-a-Judge Peformance. Using LLAMA-3-70B. Scale from 0-100. The higher, the better.',
    'meteor': 'METEOR Score. The higher, the better.',
    'bleu': 'BLEU Score. The higher, the better.',
}

dataname_column_rename_in_table = {
    'librispeech_test_clean' : 'LibriSpeech-Clean',
    'librispeech_test_other' : 'LibriSpeech-Other',
    'common_lvoice_15_en_test': 'CommonVoice-15-EN',
    'peoples_speech_test'    : 'Peoples-Speech',
    'gigaspeech_test'        : 'GigaSpeech-1',
    'earnings21_test'        : 'Earnings-21',
    'earnings22_test'        : 'Earnings-22',
    'tedlium3_test'          : 'TED-LIUM-3',
    'tedlium3_long_form_test': 'TED-LIUM-3-Long',
    'aishell_asr_zh_test'    : 'Aishell-ASR-ZH',
    'covost2_en_id_test'     : 'Covost2-EN-ID',
    'covost2_en_zh_test'     : 'Covost2-EN-ZH',
    'covost2_en_ta_test'     : 'Covost2-EN-TA',
    'covost2_id_en_test'     : 'Covost2-ID-EN',
    'covost2_zh_en_test'     : 'Covost2-ZH-EN',
    'covost2_ta_en_test'     : 'Covost2-TA-EN',
    'cn_college_listen_mcq_test': 'CN-College-Listen-MCQ',
    'dream_tts_mcq_test'    : 'DREAM-TTS-MCQ',
    'slue_p2_sqa5_test'     : 'SLUE-P2-SQA5',
    'public_sg_speech_qa_test': 'Public-SG-Speech-QA',
    'spoken_squad_test'     : 'Spoken-SQuAD',
    'openhermes_audio_test' : 'OpenHermes-Audio',
    'alpaca_audio_test'     : 'ALPACA-Audio',
    'wavcaps_test'     : 'WavCaps',
    'audiocaps_test'     : 'AudioCaps',
    'clotho_aqa_test'     : 'Clotho-AQA',
    'wavcaps_qa_test'     : 'WavCaps-QA',
    'audiocaps_qa_test'     : 'AudioCaps-QA',
    'voxceleb_accent_test'     : 'VoxCeleb-Accent',
    'voxceleb_gender_test'     : 'VoxCeleb-Gender',
    'iemocap_gender_test': 'IEMOCAP-Gender',
    'iemocap_emotion_test': 'IEMOCAP-Emotion',
    'meld_sentiment_test': 'MELD-Sentiment',
    'meld_emotion_test': 'MELD-Emotion',
   
}