ajyy commited on
Commit
87f18a9
1 Parent(s): f134b8b

Upload 5 files

Browse files
Files changed (5) hide show
  1. MELD_Audio.py +146 -0
  2. README.md +42 -4
  3. dev.csv +0 -0
  4. test.csv +0 -0
  5. train.csv +0 -0
MELD_Audio.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import datasets
5
+ import pandas as pd
6
+ from datasets import ClassLabel
7
+ import os
8
+
9
+ """The Audio, Speech, and Vision Processing Lab - Emotional Sound Database (ASVP - ESD)"""
10
+
11
+ _CITATION = """\
12
+ @article{poria2018meld,
13
+ title={Meld: A multimodal multi-party dataset for emotion recognition in conversations},
14
+ author={Poria, Soujanya and Hazarika, Devamanyu and Majumder, Navonil and Naik, Gautam and Cambria, Erik and Mihalcea, Rada},
15
+ journal={arXiv preprint arXiv:1810.02508},
16
+ year={2018}
17
+ }
18
+ @article{chen2018emotionlines,
19
+ title={Emotionlines: An emotion corpus of multi-party conversations},
20
+ author={Chen, Sheng-Yeh and Hsu, Chao-Chun and Kuo, Chuan-Chun and Ku, Lun-Wei and others},
21
+ journal={arXiv preprint arXiv:1802.08379},
22
+ year={2018}
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ Multimodal EmotionLines Dataset (MELD) has been created by enhancing and extending EmotionLines dataset.
28
+ MELD contains the same dialogue instances available in EmotionLines, but it also encompasses audio and
29
+ visual modality along with text. MELD has more than 1400 dialogues and 13000 utterances from Friends TV series.
30
+ Multiple speakers participated in the dialogues. Each utterance in a dialogue has been labeled by any of these
31
+ seven emotions -- Anger, Disgust, Sadness, Joy, Neutral, Surprise and Fear. MELD also has sentiment (positive,
32
+ negative and neutral) annotation for each utterance.
33
+ This dataset is modified from https://huggingface.co/datasets/zrr1999/MELD_Text_Audio.
34
+ The audio is extracted from MELD mp4 files while the audio only has one channel with sample rate 16khz.
35
+ """
36
+
37
+ _LICENSE = "gpl-3.0"
38
+
39
+
40
+ class MELD_Audio(datasets.GeneratorBasedBuilder):
41
+ """TODO: Short description of my dataset."""
42
+
43
+ VERSION = datasets.Version("0.0.1")
44
+ BUILDER_CONFIGS = [ # noqa: RUF012
45
+ datasets.BuilderConfig(name="MELD_Audio", version=VERSION, description="MELD audio"),
46
+ ]
47
+
48
+ def _info(self):
49
+ return datasets.DatasetInfo(
50
+ description=_DESCRIPTION,
51
+ features=datasets.Features(
52
+ {
53
+ "text": datasets.Value("string"),
54
+ "path": datasets.Value("string"),
55
+ "audio": datasets.Audio(sampling_rate=16000),
56
+ "emotion": ClassLabel(names=["neutral", "joy", "sadness", "anger", "fear", "disgust", "surprise"]),
57
+ "sentiment": ClassLabel(names=["neutral", "positive", "negative"]),
58
+ }
59
+ ),
60
+ homepage=_HOMEPAGE,
61
+ license=_LICENSE,
62
+ citation=_CITATION,
63
+ )
64
+
65
+ def _split_generators(self, dl_manager):
66
+ """Returns SplitGenerators."""
67
+ metadata_dir: dict[str, str] = dl_manager.download_and_extract(
68
+ {"train": "train_sent_emo.csv", "validation": "dev_sent_emo.csv", "test": "test_sent_emo.csv"}
69
+ ) # type: ignore # noqa: PGH003
70
+ data_path: dict[str, str] = dl_manager.download(
71
+ {
72
+ "audios_train": "archive/train.tar.gz",
73
+ "audios_validation": "archive/dev.tar.gz",
74
+ "audios_test": "archive/test.tar.gz",
75
+ }
76
+ ) # type: ignore # noqa: PGH003
77
+ path_to_clips = "MELD_Audio"
78
+ local_extracted_archive: dict[str, str] = (
79
+ dl_manager.extract(data_path)
80
+ if not dl_manager.is_streaming
81
+ else {
82
+ "audios_train": None,
83
+ "audios_validation": None,
84
+ "audios_test": None,
85
+ }
86
+ ) # type: ignore # noqa: PGH003
87
+
88
+ return [
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TRAIN, # type: ignore # noqa: PGH003
91
+ gen_kwargs={
92
+ "filepath": metadata_dir["train"],
93
+ "split": "train",
94
+ "local_extracted_archive": local_extracted_archive["audios_train"],
95
+ "audio_files": dl_manager.iter_archive(data_path["audios_train"]),
96
+ "path_to_clips": path_to_clips,
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.VALIDATION, # type: ignore # noqa: PGH003
101
+ gen_kwargs={
102
+ "filepath": metadata_dir["validation"],
103
+ "split": "validation",
104
+ "local_extracted_archive": local_extracted_archive["audios_validation"],
105
+ "audio_files": dl_manager.iter_archive(data_path["audios_validation"]),
106
+ "path_to_clips": path_to_clips,
107
+ },
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TEST, # type: ignore # noqa: PGH003
111
+ gen_kwargs={
112
+ "filepath": metadata_dir["test"],
113
+ "split": "test",
114
+ "local_extracted_archive": local_extracted_archive["audios_test"],
115
+ "audio_files": dl_manager.iter_archive(data_path["audios_test"]),
116
+ "path_to_clips": path_to_clips,
117
+ },
118
+ ),
119
+ ]
120
+
121
+ def _generate_examples(self, filepath, split, local_extracted_archive, audio_files, path_to_clips):
122
+ """Yields examples."""
123
+ metadata_df = pd.read_csv(filepath, sep=",", index_col=0, header=0)
124
+ metadata = {}
125
+ for _, row in metadata_df.iterrows():
126
+ id_ = f"dia{row['Dialogue_ID']}_utt{row['Utterance_ID']}"
127
+ audio_path = f"{split}/{id_}.flac"
128
+ metadata[audio_path] = row
129
+
130
+ id_ = 0
131
+ for path, f in audio_files:
132
+ if path in metadata:
133
+ row = metadata[path]
134
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
135
+ audio = {"path": path, bytes: f.read()}
136
+ yield (
137
+ id_,
138
+ {
139
+ "text": row["Utterance"],
140
+ "path": path,
141
+ "audio": audio,
142
+ "emotion": row["Emotion"],
143
+ "sentiment": row["Sentiment"],
144
+ },
145
+ )
146
+ id_ += 1
README.md CHANGED
@@ -1,5 +1,43 @@
1
  ---
2
- license: gpl-3.0
3
- language:
4
- - en
5
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ dataset_info:
3
+ config_name: MELD_Text
4
+ features:
5
+ - name: text
6
+ dtype: string
7
+ - name: path
8
+ dtype: string
9
+ - name: audio
10
+ dtype:
11
+ audio:
12
+ sampling_rate: 16000
13
+ - name: emotion
14
+ dtype:
15
+ class_label:
16
+ names:
17
+ '0': neutral
18
+ '1': joy
19
+ '2': sadness
20
+ '3': anger
21
+ '4': fear
22
+ '5': disgust
23
+ '6': surprise
24
+ - name: sentiment
25
+ dtype:
26
+ class_label:
27
+ names:
28
+ '0': neutral
29
+ '1': positive
30
+ '2': negative
31
+ splits:
32
+ - name: train
33
+ num_bytes: 3629722
34
+ num_examples: 9988
35
+ - name: validation
36
+ num_bytes: 411341
37
+ num_examples: 1108
38
+ - name: test
39
+ num_bytes: 945283
40
+ num_examples: 2610
41
+ download_size: 7840135137
42
+ dataset_size: 4986346
43
+ ---
dev.csv ADDED
The diff for this file is too large to render. See raw diff
 
test.csv ADDED
The diff for this file is too large to render. See raw diff
 
train.csv ADDED
The diff for this file is too large to render. See raw diff