# from pathlib import Path # import pandas as pd # import regex as re # import os # import torchaudio # import argparse # from tqdm import tqdm # from collections import OrderedDict # feat_dict = OrderedDict() # od['Modality'] = ['full-AV', 'video-only', 'audio-only'] # od['Vocal channel'] = ['speech', 'song'] # od['Emotion'] = ['neutral', 'calm', 'happy', 'sad', 'angry', 'fearful', 'disgust', 'surprised'] # od['Emotion intensity'] = ['normal', 'strong'] # od['Statement'] = ["Kids are talking by the door", "Dogs are sitting by the door"] # od['Repetition'] = ["1st repetition", "2nd repetition"] # # def filename2feats(filename): # # codes = filename.stem.split('-') # # for i, k in enumerate(od.keys()): # # d = {} # # d[k] = od[k][int(codes[i])-1] # # d['Actor'] = codes[-1] # # d['Gender'] = 'female' if int(codes[-1]) % 2 == 0 else 'male' # # return d # def preprocess(data_root_path): # output_dir = data_root_path / "RAVDESS_ser" # for f in data_root_path.iterdir(): # print(f) # filename2feats(filename) # print("\n\n") # # Filename identifiers # # Modality (01 = full-AV, 02 = video-only, 03 = audio-only). # # Vocal channel (01 = speech, 02 = song). # # Emotion (01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised). # # Emotional intensity (01 = normal, 02 = strong). NOTE: There is no strong intensity for the 'neutral' emotion. # # Statement (01 = "Kids are talking by the door", 02 = "Dogs are sitting by the door"). # # Repetition (01 = 1st repetition, 02 = 2nd repetition). # # Actor (01 to 24. Odd numbered actors are male, even numbered actors are female). # # Filename example: 02-01-06-01-02-01-12.mp4 # # Video-only (02) # # Speech (01) # # Fearful (06) # # Normal intensity (01) # # Statement "dogs" (02) # # 1st Repetition (01) # # 12th Actor (12) # # Female, as the actor ID number is even. # # self.data_root_path = Path(data_root_path) # # df = pd.DataFrame() # # for session in range(1,5): # # print(f"Processing session {session}") # # df = pd.concat([df, self.read_session_data(session)]) # # # Write the sliced wavs # # print("Writing wav slices to file...") # # sample_rate = 16000 # # for index, row in df.iterrows(): # # old_filename = str(self.data_root_path / Path(row['Path_to_Wav'])) # # new_filename = str(output_dir / (index + ".wav")) # # waveform = self.read_audio(old_filename, # # start=row['Time_Start'], # # end=row['Time_End']) # # torchaudio.save(os.path.join(new_filename), # # src=waveform, # # sample_rate=sample_rate) # # df.at[index, 'Path_to_Wav'] = new_filename # # # Write out the combined data information # # try: # # df.to_csv(output_filename, index=False, header=True) # # except: # # print("Error writing dataframe to csv.") # # def read_session_data(self, session_id): # # d1 = self.read_emotion_labels(session_id) # # d2 = self.read_transcriptions(session_id) # # return d1.join(d2) # # def read_emotion_labels(self, session_id): # # emo_path = Path(self.data_root_path / Path(f"Session{session_id}") / Path("dialog") / Path("EmoEvaluation")) # # emo_files = [f for f in list(emo_path.iterdir()) if f.suffix == ".txt"] # # df = pd.DataFrame() # # for ef in emo_files: # # df2 = self.read_emotion_file(ef) # # for ri, row in df2.iterrows(): # # df2.loc[ri, 'Path_to_Wav'] = os.path.join(f"Session{session_id}", # # "dialog", "wav", # # row['Session_ID'] +".wav") # # df = pd.concat([df, df2]) # # df = df.set_index('ID') # # return df # # def slice_audio(self, session_id): # # for i, row in df.iterrows(): # # filename = row['Session_ID'] + ".wav" # # wav_path = Path(self.data_root_path / Path(f"Session{session_id}") / Path("dialog") / Path("wav") / Path(filename)) # # print("wav path = ", wav_path) # # self.read_audio(wav_path, row['Time_Start'], row['Time_End'], row['Annotations']) # # def read_emotion_file(self, filename): # # time_extract_pattern = "\[([0-9\.]+) - ([0-9\.]+)\] +([^ ]+) +([^ ]+) \[([^\]]+)\]" # # df = pd.DataFrame() #columns=columns) # # i = 0 # # with open(filename) as file: # # lines = file.readlines() # # lines = lines[2:] #:10] # # while i < len(lines): # # # Remove header # # if match := re.search(time_extract_pattern, lines[i].replace("\t", " ")): # # time_start = float(match.group(1)) # # time_end = float(match.group(2)) # # filename = match.group(3) # # mys_id = match.group(4) # # digits = [float(x) for x in match.group(5).split(", ")] # # annotations = [] # # while lines[i] != "\n": # # i += 1 # # if lines[i].startswith("C-"): # # aid, anns, _ = lines[i].split("\t") # # for an in anns.split(";")[:-1]: # # annotations.append(an.strip()) # # elif lines[i].startswith("A-"): # # pass # # annotations = list(set(annotations)) # # annotations = ','.join(annotations) # # session_id = filename[:filename.rindex("_")] # # utt_id = filename[filename.rindex("_")+1:] # # df2 = pd.DataFrame([{ # # 'ID': filename, # ID for join between dataframes is the filename # # 'Session_ID': session_id, # # 'Utterance_ID': utt_id, # # 'Time_Start': time_start, # # 'Time_End': time_end, # # 'Labels': annotations}]) # # df = pd.concat([df, df2], ignore_index=True) # # else: # # i += 1 # # return df # # def read_transcriptions(self, session_id): # # df = pd.DataFrame() # # transcripts_path = Path(self.data_root_path / Path(f"Session{session_id}") / Path("dialog") / Path("transcriptions")) # # transcript_files = [f for f in list(transcripts_path.iterdir()) if f.suffix == ".txt"] # # for f in transcript_files: # # df = pd.concat([df, self.read_transcript(f)], ignore_index=True) # # df = df.set_index('ID') # # return df # # def read_transcript(self, filename): # # df = pd.DataFrame() # # with open(filename, "r") as f: # # for l in f.readlines(): # # cols = l.strip().split(" ") # # if l[1] != ":" and len(cols) > 2: # There are some lines like "F:Mmhmm." that get ignored here # # df2 = pd.DataFrame([{ # # 'ID': cols[0], # # 'Transcription': ' '.join(cols[2:]) # # }]) # # df = pd.concat([df, df2]) # # return df # # def read_audio(self, filename, start, end, sample_rate=16000): # # waveform, sample_rate = torchaudio.load(filename, # # frame_offset=int(start * sample_rate), # # num_frames=int((end-start) * sample_rate)) # # return waveform # # if __name__ == '__main__': # # # osx_path = '/Users/narad/Downloads/RAVDESS_full_release' # # # windows_path = r'C:\Users\jasonn\Desktop\ser\data\RAVDESS_full_release' # # parser = argparse.ArgumentParser(description='Process some integers.') # # parser.add_argument('--data_dir', type=Path, required=True, # # help='Path to IEOMCAP release directory.') # # parser.add_argument('--output_file', type=Path, default="data.csv", # # help='Filename for Huggingface-compatible dataset csv file.') # # parser.add_argument('--output_dir', type=Path, default="processed", # # help='Directory for processed wav files') # # args = parser.parse_args() # # print(args) # # reader = RAVDESS(data_root_path=args.data_dir, # # output_filename=args.output_file) # # columns = ['Utterance_ID', # # 'Time_Start', # # 'Time-End', # # 'Annotations']