Datasets:
Tasks:
Audio Classification
Sub-tasks:
audio-emotion-recognition
Languages:
English
Size:
1K<n<10K
License:
Upload ravdess_preprocessor.py
Browse files- ravdess_preprocessor.py +234 -0
ravdess_preprocessor.py
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from pathlib import Path
|
2 |
+
|
3 |
+
# import pandas as pd
|
4 |
+
# import regex as re
|
5 |
+
# import os
|
6 |
+
|
7 |
+
# import torchaudio
|
8 |
+
# import argparse
|
9 |
+
# from tqdm import tqdm
|
10 |
+
|
11 |
+
# from collections import OrderedDict
|
12 |
+
|
13 |
+
|
14 |
+
# feat_dict = OrderedDict()
|
15 |
+
# od['Modality'] = ['full-AV', 'video-only', 'audio-only']
|
16 |
+
# od['Vocal channel'] = ['speech', 'song']
|
17 |
+
# od['Emotion'] = ['neutral', 'calm', 'happy', 'sad', 'angry', 'fearful', 'disgust', 'surprised']
|
18 |
+
# od['Emotion intensity'] = ['normal', 'strong']
|
19 |
+
# od['Statement'] = ["Kids are talking by the door", "Dogs are sitting by the door"]
|
20 |
+
# od['Repetition'] = ["1st repetition", "2nd repetition"]
|
21 |
+
|
22 |
+
|
23 |
+
# # def filename2feats(filename):
|
24 |
+
# # codes = filename.stem.split('-')
|
25 |
+
# # for i, k in enumerate(od.keys()):
|
26 |
+
# # d = {}
|
27 |
+
# # d[k] = od[k][int(codes[i])-1]
|
28 |
+
# # d['Actor'] = codes[-1]
|
29 |
+
# # d['Gender'] = 'female' if int(codes[-1]) % 2 == 0 else 'male'
|
30 |
+
# # return d
|
31 |
+
|
32 |
+
# def preprocess(data_root_path):
|
33 |
+
# output_dir = data_root_path / "RAVDESS_ser"
|
34 |
+
# for f in data_root_path.iterdir():
|
35 |
+
# print(f)
|
36 |
+
# filename2feats(filename)
|
37 |
+
# print("\n\n")
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
# # Filename identifiers
|
42 |
+
|
43 |
+
# # Modality (01 = full-AV, 02 = video-only, 03 = audio-only).
|
44 |
+
# # Vocal channel (01 = speech, 02 = song).
|
45 |
+
# # Emotion (01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised).
|
46 |
+
# # Emotional intensity (01 = normal, 02 = strong). NOTE: There is no strong intensity for the 'neutral' emotion.
|
47 |
+
# # Statement (01 = "Kids are talking by the door", 02 = "Dogs are sitting by the door").
|
48 |
+
# # Repetition (01 = 1st repetition, 02 = 2nd repetition).
|
49 |
+
# # Actor (01 to 24. Odd numbered actors are male, even numbered actors are female).
|
50 |
+
|
51 |
+
# # Filename example: 02-01-06-01-02-01-12.mp4
|
52 |
+
|
53 |
+
# # Video-only (02)
|
54 |
+
# # Speech (01)
|
55 |
+
# # Fearful (06)
|
56 |
+
# # Normal intensity (01)
|
57 |
+
# # Statement "dogs" (02)
|
58 |
+
# # 1st Repetition (01)
|
59 |
+
# # 12th Actor (12)
|
60 |
+
# # Female, as the actor ID number is even.
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
# # self.data_root_path = Path(data_root_path)
|
69 |
+
# # df = pd.DataFrame()
|
70 |
+
# # for session in range(1,5):
|
71 |
+
# # print(f"Processing session {session}")
|
72 |
+
# # df = pd.concat([df, self.read_session_data(session)])
|
73 |
+
|
74 |
+
# # # Write the sliced wavs
|
75 |
+
# # print("Writing wav slices to file...")
|
76 |
+
# # sample_rate = 16000
|
77 |
+
# # for index, row in df.iterrows():
|
78 |
+
# # old_filename = str(self.data_root_path / Path(row['Path_to_Wav']))
|
79 |
+
# # new_filename = str(output_dir / (index + ".wav"))
|
80 |
+
# # waveform = self.read_audio(old_filename,
|
81 |
+
# # start=row['Time_Start'],
|
82 |
+
# # end=row['Time_End'])
|
83 |
+
# # torchaudio.save(os.path.join(new_filename),
|
84 |
+
# # src=waveform,
|
85 |
+
# # sample_rate=sample_rate)
|
86 |
+
# # df.at[index, 'Path_to_Wav'] = new_filename
|
87 |
+
|
88 |
+
|
89 |
+
# # # Write out the combined data information
|
90 |
+
# # try:
|
91 |
+
# # df.to_csv(output_filename, index=False, header=True)
|
92 |
+
# # except:
|
93 |
+
# # print("Error writing dataframe to csv.")
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
# # def read_session_data(self, session_id):
|
98 |
+
# # d1 = self.read_emotion_labels(session_id)
|
99 |
+
# # d2 = self.read_transcriptions(session_id)
|
100 |
+
# # return d1.join(d2)
|
101 |
+
|
102 |
+
|
103 |
+
# # def read_emotion_labels(self, session_id):
|
104 |
+
# # emo_path = Path(self.data_root_path / Path(f"Session{session_id}") / Path("dialog") / Path("EmoEvaluation"))
|
105 |
+
# # emo_files = [f for f in list(emo_path.iterdir()) if f.suffix == ".txt"]
|
106 |
+
# # df = pd.DataFrame()
|
107 |
+
# # for ef in emo_files:
|
108 |
+
# # df2 = self.read_emotion_file(ef)
|
109 |
+
# # for ri, row in df2.iterrows():
|
110 |
+
# # df2.loc[ri, 'Path_to_Wav'] = os.path.join(f"Session{session_id}",
|
111 |
+
# # "dialog", "wav",
|
112 |
+
# # row['Session_ID'] +".wav")
|
113 |
+
# # df = pd.concat([df, df2])
|
114 |
+
# # df = df.set_index('ID')
|
115 |
+
# # return df
|
116 |
+
|
117 |
+
|
118 |
+
# # def slice_audio(self, session_id):
|
119 |
+
# # for i, row in df.iterrows():
|
120 |
+
# # filename = row['Session_ID'] + ".wav"
|
121 |
+
# # wav_path = Path(self.data_root_path / Path(f"Session{session_id}") / Path("dialog") / Path("wav") / Path(filename))
|
122 |
+
# # print("wav path = ", wav_path)
|
123 |
+
# # self.read_audio(wav_path, row['Time_Start'], row['Time_End'], row['Annotations'])
|
124 |
+
|
125 |
+
|
126 |
+
# # def read_emotion_file(self, filename):
|
127 |
+
# # time_extract_pattern = "\[([0-9\.]+) - ([0-9\.]+)\] +([^ ]+) +([^ ]+) \[([^\]]+)\]"
|
128 |
+
# # df = pd.DataFrame() #columns=columns)
|
129 |
+
# # i = 0
|
130 |
+
# # with open(filename) as file:
|
131 |
+
# # lines = file.readlines()
|
132 |
+
# # lines = lines[2:] #:10]
|
133 |
+
|
134 |
+
# # while i < len(lines):
|
135 |
+
# # # Remove header
|
136 |
+
# # if match := re.search(time_extract_pattern, lines[i].replace("\t", " ")):
|
137 |
+
# # time_start = float(match.group(1))
|
138 |
+
# # time_end = float(match.group(2))
|
139 |
+
# # filename = match.group(3)
|
140 |
+
# # mys_id = match.group(4)
|
141 |
+
# # digits = [float(x) for x in match.group(5).split(", ")]
|
142 |
+
# # annotations = []
|
143 |
+
# # while lines[i] != "\n":
|
144 |
+
# # i += 1
|
145 |
+
# # if lines[i].startswith("C-"):
|
146 |
+
# # aid, anns, _ = lines[i].split("\t")
|
147 |
+
# # for an in anns.split(";")[:-1]:
|
148 |
+
# # annotations.append(an.strip())
|
149 |
+
# # elif lines[i].startswith("A-"):
|
150 |
+
# # pass
|
151 |
+
|
152 |
+
# # annotations = list(set(annotations))
|
153 |
+
# # annotations = ','.join(annotations)
|
154 |
+
|
155 |
+
# # session_id = filename[:filename.rindex("_")]
|
156 |
+
# # utt_id = filename[filename.rindex("_")+1:]
|
157 |
+
|
158 |
+
# # df2 = pd.DataFrame([{
|
159 |
+
# # 'ID': filename, # ID for join between dataframes is the filename
|
160 |
+
# # 'Session_ID': session_id,
|
161 |
+
# # 'Utterance_ID': utt_id,
|
162 |
+
# # 'Time_Start': time_start,
|
163 |
+
# # 'Time_End': time_end,
|
164 |
+
# # 'Labels': annotations}])
|
165 |
+
# # df = pd.concat([df, df2], ignore_index=True)
|
166 |
+
# # else:
|
167 |
+
# # i += 1
|
168 |
+
# # return df
|
169 |
+
|
170 |
+
|
171 |
+
# # def read_transcriptions(self, session_id):
|
172 |
+
# # df = pd.DataFrame()
|
173 |
+
# # transcripts_path = Path(self.data_root_path / Path(f"Session{session_id}") / Path("dialog") / Path("transcriptions"))
|
174 |
+
# # transcript_files = [f for f in list(transcripts_path.iterdir()) if f.suffix == ".txt"]
|
175 |
+
# # for f in transcript_files:
|
176 |
+
# # df = pd.concat([df, self.read_transcript(f)], ignore_index=True)
|
177 |
+
# # df = df.set_index('ID')
|
178 |
+
# # return df
|
179 |
+
|
180 |
+
|
181 |
+
# # def read_transcript(self, filename):
|
182 |
+
# # df = pd.DataFrame()
|
183 |
+
# # with open(filename, "r") as f:
|
184 |
+
# # for l in f.readlines():
|
185 |
+
# # cols = l.strip().split(" ")
|
186 |
+
# # if l[1] != ":" and len(cols) > 2: # There are some lines like "F:Mmhmm." that get ignored here
|
187 |
+
# # df2 = pd.DataFrame([{
|
188 |
+
# # 'ID': cols[0],
|
189 |
+
# # 'Transcription': ' '.join(cols[2:])
|
190 |
+
# # }])
|
191 |
+
# # df = pd.concat([df, df2])
|
192 |
+
# # return df
|
193 |
+
|
194 |
+
|
195 |
+
# # def read_audio(self, filename, start, end, sample_rate=16000):
|
196 |
+
# # waveform, sample_rate = torchaudio.load(filename,
|
197 |
+
# # frame_offset=int(start * sample_rate),
|
198 |
+
# # num_frames=int((end-start) * sample_rate))
|
199 |
+
# # return waveform
|
200 |
+
|
201 |
+
|
202 |
+
# # if __name__ == '__main__':
|
203 |
+
# # # osx_path = '/Users/narad/Downloads/RAVDESS_full_release'
|
204 |
+
# # # windows_path = r'C:\Users\jasonn\Desktop\ser\data\RAVDESS_full_release'
|
205 |
+
|
206 |
+
# # parser = argparse.ArgumentParser(description='Process some integers.')
|
207 |
+
# # parser.add_argument('--data_dir', type=Path, required=True,
|
208 |
+
# # help='Path to IEOMCAP release directory.')
|
209 |
+
# # parser.add_argument('--output_file', type=Path, default="data.csv",
|
210 |
+
# # help='Filename for Huggingface-compatible dataset csv file.')
|
211 |
+
# # parser.add_argument('--output_dir', type=Path, default="processed",
|
212 |
+
# # help='Directory for processed wav files')
|
213 |
+
# # args = parser.parse_args()
|
214 |
+
|
215 |
+
# # print(args)
|
216 |
+
|
217 |
+
# # reader = RAVDESS(data_root_path=args.data_dir,
|
218 |
+
# # output_filename=args.output_file)
|
219 |
+
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
|
230 |
+
# # columns = ['Utterance_ID',
|
231 |
+
# # 'Time_Start',
|
232 |
+
# # 'Time-End',
|
233 |
+
# # 'Annotations']
|
234 |
+
|