import os
import numpy as np
import librosa
import python_speech_features
from pathlib import Path

import multiprocessing


def splited_by_windows(data, wind_stride, wind_length, padding=True):
    """
    data:    seq_len1 x dim1 
    return:  seq_len2 x wind_length x dim1
             where seq_len2 = (L + 2p - k) // wind_stride + 1
    """


    seq_len1, dim1 = data.shape

    seq_len2 = seq_len1 // wind_stride + 1
  
    #print("DATA befor shape=", data.shape)  # DATA befor shape= (121, 13)
    data = np.pad(data, pad_width=((wind_length//2,wind_length//2), (0,0)), mode="reflect") 
    #print("DATA after shape=", data.shape)  # DATA after shape= (149, 13)

    data_output = np.zeros((seq_len2, wind_length, dim1-1), dtype=float)
    
    for i in range(seq_len2):
        ibeg = i * wind_stride
        iend = i * wind_stride + wind_length
        data_output[i, :, :] = data[ibeg : iend, 1:]    # TODO: why form 1: 

    return data_output


# def func_processing(path_audio):
    

if __name__ == "__main__":

    path_data = Path("/mnt/data/DATA/LRW/processing")

    pool = multiprocessing.Pool(processes=4)


    # for each word
    v_ids = sorted(path_data.glob("*"))
    print(f"Have {len(v_ids)} dir files, \n {v_ids[:3]}")

    for i, ipath_id in enumerate(v_ids):
        id_name = ipath_id.stem

        dir_video = path_data.joinpath(id_name)
        v_dir_videos = sorted(dir_video.glob("*/*/video/video_orignal.mp4"))

        print(f"\nid= {id_name}, Have {len(v_dir_videos)} video files, \n {v_dir_videos[:3]}")

        # for each video
        for j, jpath_videos in enumerate(v_dir_videos):
            
            if (j+1) % 100 == 0: print(f"processing {j+1}/{len(v_dir_videos)}")

            jpath_data = jpath_videos.parent.parent
            jpath_frame = jpath_data.joinpath("frames")
            nframes = len(sorted(jpath_frame.glob("*.jpg")))
            #print(f"jpath_data = {jpath_data}, nframe = {nframes}")


            ipath_audio = jpath_data.joinpath("audio/audio_orignal.wav")
            ipath_feat_save = jpath_data.joinpath("audio/mfcc_feat.npy")
            ipath_feat_orignal_save = jpath_data.joinpath("audio/mfcc_feat_orignal.npy")

            # for some data not have .wav files
            if not ipath_audio.exists():
                print(f"NOT EXISTING: {ipath_audio}")
                continue

            if ipath_feat_orignal_save.exists():
                print(f"EXSTING, {ipath_feat_orignal_save}")
                continue


            # extarct mfcc feature 
            audio, sr = librosa.load(str(ipath_audio), sr=16000)                              # (69760, )
            mfcc_orig = python_speech_features.mfcc(audio, 16000, winstep=0.01)                    
            
            #duration = len(audio) / sr
            #print(f"audio shape={audio.shape}, mfcc shape={mfcc_orig.shape}, duration={duration}") 

            # splited by windows (to fit video frame with fps 25)
            wind_stride = 4     #  4 x 10ms =  40 ms, video(25fps) frame = 1/25s = 40ms
            wind_length = 28    # 28 x 10ms = 280 ms, = 7 frames * 40 ms
            # seq_len2 = (seq_len1 + 2*pad - wind_length) // wind_stride + 1
            mfcc = splited_by_windows(mfcc_orig, wind_stride, wind_length, padding=True)      # seq_len2 x 28 x 12, 
            #print(f"MFCC input size (splited by windows) ={mfcc.shape}") 

            # save files
            np.save(str(ipath_feat_orignal_save), mfcc_orig)
            # np.save(str(ipath_feat_save), mfcc)
            # print(f"SAVE: mfcc size={mfcc.shape} --> {str(ipath_feat_save)}")    # mfcc size=(31, 28, 12)
            # print(f"SAVE: mfcc size={mfcc_orig.shape} --> {str(ipath_feat_orignal_save)}")    # mfcc size=(31, 28, 12)

            #break
        #break