
'''
init dataset
convert video to audio and then image
add csv file as a label file
'''

import os
import numpy as np
import random
import matplotlib.pyplot as plt
import csv
import shutil
import librosa
import librosa.display
from tqdm import tqdm
from moviepy.editor import AudioFileClip

class DatasetTools:

    def __init__(self, raw_path, dataset_path):
        
        self.raw_path = raw_path
        self.dataset_path = dataset_path
        self.label_file = os.path.join(self.dataset_path, 'label.csv')
        self.audio_length = 100000

        if not os.path.exists(self.dataset_path):
            os.mkdir(self.dataset_path)
        

    def convert_video_to_audio(self, video_path, audio_path):

        curr_audio = AudioFileClip(video_path)
        curr_audio.write_audiofile(audio_path, logger=None)
        curr_audio.close()

    def convert_audio_to_image(self, audio_path, image_dir):

        raw_data, raw_framesize = librosa.load(audio_path, sr=None, mono=False)
        left_data = raw_data[0]
        right_data = raw_data[1]

        raw_length = len(left_data)
        if raw_length <= self.audio_length:

            # need to add a new part in the end
            raw_data_final = raw_data[100:-100]
            repeat_num = int(self.audio_length / raw_length) + 1
            for times in range(repeat_num+1):
                raw_data_final = np.hstack((raw_data_final, raw_data[100:-100]))

            raw_length = len(raw_data_final)
            left_data = raw_data_final[0]
            right_data = raw_data_final[1]
            max_start_idx = raw_length - self.audio_length

            name_idx = 0
            
            for img_idx in range(5):
                start_idx = random.randint(0, max_start_idx)
                clipped_data_left = left_data[start_idx: start_idx + self.audio_length]
                clipped_data_right = right_data[start_idx: start_idx + self.audio_length]

                for clipped_data in [clipped_data_left, clipped_data_right]:

                    data = clipped_data * 1.0 / clipped_data.max()
                    framelength = 0.025
                    framesize = int(framelength * raw_framesize)

                    # get feature
                    mel_spect = librosa.feature.melspectrogram(data, sr=raw_framesize, n_fft=framesize)
                    mel_spect = librosa.power_to_db(mel_spect, ref=np.max)

                    # draw
                    image_path = os.path.join(image_dir, \
                                    os.path.basename(audio_path)[:-4] + '_' + str(name_idx) + '.jpg')
                    plt.axis('off')
                    librosa.display.specshow(mel_spect, sr=raw_framesize, x_axis='time', y_axis='mel')
                    plt.savefig(image_path, transparent=False, bbox_inches='tight', pad_inches=0)
                    plt.close()
                    name_idx += 1

        else:
            # clip

            max_start_idx = raw_length - self.audio_length

            name_idx = 0
            
            for img_idx in range(5):
                start_idx = random.randint(0, max_start_idx)
                clipped_data_left = left_data[start_idx: start_idx + self.audio_length]
                clipped_data_right = right_data[start_idx: start_idx + self.audio_length]

                for clipped_data in [clipped_data_left, clipped_data_right]:

                    data = clipped_data * 1.0 / clipped_data.max()
                    framelength = 0.025
                    framesize = int(framelength * raw_framesize)

                    # get feature
                    mel_spect = librosa.feature.melspectrogram(data, sr=raw_framesize, n_fft=framesize)
                    mel_spect = librosa.power_to_db(mel_spect, ref=np.max)

                    # draw
                    image_path = os.path.join(image_dir, \
                                    os.path.basename(audio_path)[:-4] + '_' + str(name_idx) + '.jpg')
                    plt.axis('off')
                    librosa.display.specshow(mel_spect, sr=raw_framesize, x_axis='time', y_axis='mel')
                    plt.savefig(image_path, transparent=False, bbox_inches='tight', pad_inches=0)
                    plt.close()
                    name_idx += 1

    def check_path(self):

        audio_root_path = os.path.join(self.dataset_path, 'audio')
        if not os.path.exists(audio_root_path):
            os.mkdir(audio_root_path)
        
        image_root_path = os.path.join(self.dataset_path, 'image')
        if not os.path.exists(image_root_path):
            os.mkdir(image_root_path)

        train_path = os.path.join(self.dataset_path, 'train')
        if not os.path.exists(train_path):
            os.mkdir(train_path)

        valid_path = os.path.join(self.dataset_path, 'valid')
        if not os.path.exists(valid_path):
            os.mkdir(valid_path)
    
    def start_convert(self):

        self.check_path()

        id_list = os.listdir(self.raw_path)
        person_num = 1
        for person_id in id_list:
            print('-' * 10)
            print('convert video to audio, person {}/20'.format(str(person_num)))
            person_path = os.path.join(self.raw_path, person_id)
            video_list = os.listdir(person_path)
            person_num += 1
            for video_idx in tqdm(range(len(video_list))):
                video = video_list[video_idx]
                video_path = os.path.join(person_path, video)
                
                audio_dir = os.path.join(self.dataset_path, 'audio', person_id)
                if not os.path.exists(audio_dir):
                    os.mkdir(audio_dir)
                audio_path = os.path.join(audio_dir, person_id + '_' + video[:-4] + '.wav')
                
                self.convert_video_to_audio(video_path, audio_path)
        print('*'*10)
        
        person_num = 1
        
        for person_id in id_list:
            print('-' * 10)
            print('convert video to image, person {}/20'.format(str(person_num)))
            audio_dir = os.path.join(self.dataset_path, 'audio', person_id)
            audio_list = os.listdir(audio_dir)
            person_num += 1
            for audio_idx in tqdm(range(len(audio_list))):
                audio = audio_list[audio_idx]
                audio_path = os.path.join(audio_dir, audio)
                image_dir = os.path.join(self.dataset_path, 'image', person_id)
                if not os.path.exists(image_dir):
                    os.mkdir(image_dir)
                self.convert_audio_to_image(audio_path, image_dir)

    def get_label(self):

        label_csv = open(self.label_file, 'a')
        f_csv = csv.writer(label_csv)

        image_root_path = os.path.join(self.dataset_path, 'image')
        person_list = os.listdir(image_root_path)
        for person_idx in tqdm(range(len(person_list))):
            person_id = person_list[person_idx]
            person_dir = os.path.join(image_root_path, person_id)
            image_list = os.listdir(person_dir)
            for image in image_list:
                image_path = os.path.join(person_dir, image)
                f_csv.writerow([image_path, person_id[2:]])
        
        label_csv.close()

    def split_train_valid(self, train_scale):

        self.check_path()

        image_root_path = os.path.join(self.dataset_path, 'image')
        train_path = os.path.join(self.dataset_path, 'train')
        valid_path = os.path.join(self.dataset_path, 'valid')

        image_list = []

        for root, dirs, files in os.walk(image_root_path, topdown=False):
            for name in files:
                image_list.append(os.path.join(root, name))

        image_num = len(image_list)
        train_num = int(image_num * train_scale)
        train_list = random.sample(range(0, image_num-1), train_num)

        for image_idx in range(image_num):

            dir_name = os.path.basename(os.path.split(image_list[image_idx])[0])
            
            if image_idx in train_list:
                train_data_dir = os.path.join(train_path, dir_name)
                if not os.path.exists(train_data_dir):
                    os.mkdir(train_data_dir)
                shutil.copy(image_list[image_idx], train_data_dir)
            
            else:
                valid_data_dir = os.path.join(valid_path, dir_name)
                if not os.path.exists(valid_data_dir):
                    os.mkdir(valid_data_dir)
                shutil.copy(image_list[image_idx], valid_data_dir)



# if __name__ == '__main__':
#     dataset_tools = DatasetTools('../../../dataset/train', './')
#     dataset_tools.start_convert()
    