import python_speech_features as psf
import numpy as np
from global_config import *
# import scipy.signal as signal
import scipy.io.wavfile as wav
import os
from tqdm import tqdm
import json
import random
import librosa
import wave
from bs37ToARKit import ARKitBlendShapeLocation
# data path
projroot = os.path.join(os.path.dirname(__file__),'.')
dataroot = os.path.join(projroot,'data/beat_langs')



audio_decoder,start_videoframe,end_videoframe = create_audio_decoder()

os.makedirs(feature_path,exist_ok=True)


def comput_features(wav_files,max_count):
    files = []
    i = 0
    for i,wav_file in enumerate(wav_files):
        feature_file = os.path.basename(wav_file).split('.')[0] + '.npy'
        
            
        output = audio_decoder(wav_file,  os.path.join(feature_path, feature_file),max_duration=max_duration)
        if output is None:
            print(f'file format error:{wav_file}')
            continue
        else:
            files.append(wav_file)
            i += 1
            print(f'file format ok:{wav_file} current progress:{i} / {max_count}')
        if i >= max_count:
            break

    return files


def combine_files(files,combine_filename,target_path):
    feature_combine_file = combine_filename + '.feature.npy'
    blendshape_combine_file = combine_filename + '.bs.npy'

    feature = None
    blendshape = None
    fileCount = len(files)
    for i in range(fileCount):
        feature_file = files[i]['feature']
        anno = files[i]['anno']

        if os.path.exists(feature_file):
            
            feature_temp = np.load(feature_file)
            if len(feature_temp.shape) == 4:
                feature_temp = np.squeeze(feature_temp,3)
            
            blendshape_temp = []
            if os.path.exists(anno):       
                    # blendshape is shorter
                    # blendshape_temp = np.loadtxt(target_path+blendshape_files[i])
                if str.endswith(anno,'.json'):
                    with open(anno,'r',encoding='utf8')as fp:
                        file_contents = fp.read()                
                        parsed_json = json.loads(file_contents)

                        lastTime = parsed_json['frames'][-1]['time']
                        frameCount = len(parsed_json['frames'])
                        print(f'parse [{anno}] time [{lastTime}] frameCount [{frameCount}] progress [{i} / {fileCount}]')

                        for j in range(len(parsed_json['frames'])):
                            if j%2 != 0:
                                continue
                            item = parsed_json['frames'][j]
                            weights = []
                            for i,num in enumerate(item['weights']):
                                if ARKitBlendShapeLocation.JawForward.value - 1 <= i and i <= ARKitBlendShapeLocation.MouthUpperUpRight.value - 1:
                                    weights.append(num)
                                else:
                                    weights.append(0.0)
                            blendshape_temp.append(weights)
                    blendshape_temp = np.array(blendshape_temp,dtype=np.float32)
            
                elif str.endswith(anno,'.npy'):
                    blendshape_temp = np.load(anno)
                    for i in range(blendshape_temp.shape[1]):
                        if ARKitBlendShapeLocation.JawForward.value - 1 <= i and i <= ARKitBlendShapeLocation.MouthUpperUpRight.value - 1:
                            # blendshape_temp[:,i]
                            pass
                        else:
                            blendshape_temp[:,i] = 0
                            pass
                    blendshape_temp = blendshape_temp[:,:-1]
                
            else:
                continue

            feature_temp,blendshape_temp = cut(feature_temp, blendshape_temp)
            if feature_temp is not None and blendshape_temp is not None:
                if(feature_temp.shape[0] != blendshape_temp.shape[0]):
                    print(f'{i} shape not same: feature {feature_temp.shape}  bs {blendshape_temp.shape}')
                    continue

                if feature is None:
                    feature = feature_temp
                    blendshape = blendshape_temp
                else:

                    feature = np.concatenate((feature, feature_temp), 0)
                    blendshape = np.concatenate((blendshape, blendshape_temp), 0)

                print(i, anno, feature.shape, blendshape.shape)
            else:
                print(f'{anno} Content is Not Valid')

    np.save(os.path.join(target_path, feature_combine_file), feature)
    # np.savetxt(os.path.join(target_path, blendshape_combine_file), blendshape, fmt='%.8f')
    np.save(os.path.join(target_path, blendshape_combine_file), blendshape)

def combine(wavefiles,feature_path,target_path):
    feature_files = sorted(os.listdir(feature_path))
    print('feature: ', feature_files)
    
    feature_list = []
    for f in feature_files:
        feature_list.append(f.replace('.npy',''))
    
    files = []
    for wavfile in wavefiles:
        fn = os.path.basename(wavfile).replace('.wav','')
        json = wavfile.replace('.wav','.json')
        npy = wavfile.replace('.wav','.npy')
        if fn in feature_list:
            if os.path.exists(json):
                files.append({
                    'feature': os.path.join(feature_path,fn+'.npy'),
                    'anno':json,
                })
            elif os.path.exists(npy):
                files.append({
                    'feature': os.path.join(feature_path,fn+'.npy'),
                    'anno':npy,
                })

    
    random.shuffle(files)
    fileCount = len(files)
    
    trainCount = np.maximum((int)(np.floor(fileCount * 0.9)),1)
    train_files = files[0:trainCount]
    val_files = files[trainCount:]
    
    combine_files(train_files,"train",target_path)
    if len(val_files) == 0:
        combine_files(train_files,"val",target_path)        
    else:
        combine_files(val_files,"val",target_path)

def cut(wav_feature, blendshape_target):
    n_audioframe, n_videoframe = len(wav_feature), len(blendshape_target)
    print('--------\n', 'Current dataset -- n_audioframe: {}, n_videoframe:{}'.format(n_audioframe, n_videoframe))
    # assert n_videoframe - n_audioframe == 32
    
        
    nframe = min(n_audioframe,n_videoframe - start_videoframe - end_videoframe)

    startOffset = 0
    endOffset = 0
    minContent = 1
    if nframe > startOffset + endOffset + minContent:

        blendshape_target = blendshape_target[start_videoframe + startOffset : start_videoframe+nframe - endOffset]
        wav_feature = wav_feature[startOffset:nframe - endOffset]
        return wav_feature,blendshape_target
    else:
        return None,None

def list_files_in_dir(dir,ext,filelist):
    files = os.listdir(dir)
    for file in files:
        if lang == 'zh':
            isZh = False
            strs = file.split('_')
            if len(strs) > 1:
                if strs[2] == '2' or strs[2] == '3':
                    isZh = True
            if not isZh:
                continue 
        full_path = os.path.join(dir,file)
        if os.path.isdir(full_path):
            list_files_in_dir(full_path,ext,filelist)
        elif os.path.isfile(full_path):
            if str.endswith(file,ext):
                filelist.append(full_path)
            
def bs_preprocess(input):
    if preprocess_train:
        offset = 1
        if input.shape[1] == 27:
            offset = ARKitBlendShapeLocation.JawForward.value
        elif input.shape[1] > 27:        
            input[:,:ARKitBlendShapeLocation.JawForward.value-offset] = 0
            input[:,ARKitBlendShapeLocation.MouthUpperUpRight.value-offset + 1:] = 0
            
        input[:,:] *= 1.5
        
        if isinstance(input, torch.Tensor):
            input = torch.where(input > 1.0, 1.0, input)     
            
        elif isinstance(input, np.ndarray):
            input = np.where(input > 1.0, 1.0, input)
    
    
        input[:,ARKitBlendShapeLocation.MouthSmileLeft.value-offset] *= 0
        input[:,ARKitBlendShapeLocation.MouthSmileRight.value-offset] *= 0    
        input[:,ARKitBlendShapeLocation.MouthLowerDownLeft.value-offset] *= 0.2
        input[:,ARKitBlendShapeLocation.MouthLowerDownRight.value-offset] *= 0.2
        input[:,ARKitBlendShapeLocation.MouthUpperUpLeft.value-offset] *= 0.2
        input[:,ARKitBlendShapeLocation.MouthUpperUpRight.value-offset] *= 0.2
        
        input[:,ARKitBlendShapeLocation.JawOpen.value-offset] -= 0.3 
        input[:,ARKitBlendShapeLocation.MouthFunnel.value-offset] -= 0.3
        
        input[:,ARKitBlendShapeLocation.MouthShrugLower.value-offset] -= 0.3
        input[:,ARKitBlendShapeLocation.MouthShrugUpper.value-offset] -= 0.3
        # input[:,ARKitBlendShapeLocation.MouthShrugUpper.value-offset] *= 0.2
        # input[:,ARKitBlendShapeLocation.MouthClose.value-offset] *= 3.0
        
        if isinstance(input, torch.Tensor):
            input = torch.where(input < 0,0,input)     
            
        elif isinstance(input, np.ndarray):
            input = np.where(input < 0,0,input)
        return input
    else:
        return input

def main():
    filelist = []
    list_files_in_dir(src_dataset_path,'wav',filelist)
    if need_shuffle:
        random.shuffle(filelist)

    if copmute_feature:
        wave_files_gen = comput_features(filelist,max_wav_count)
    else:
        wave_files_gen = filelist
    
    os.makedirs(combine_path,exist_ok=True)
    combine(wave_files_gen,feature_path, combine_path)


def test_wav_json_pair():
    
    wavFile = 'data/test/15_carlos_0_11_11.wav'
    anno = 'data/test/15_carlos_0_11_11.json'
    wavFile = 'data/test/1_wayne_0_1_1.wav'
    anno = 'data/test/1_wayne_0_1_1.json'
    
    wavFile = 'data/test/woman.wav'
    # wavFile = 'D:/Beat/beat_english_v0.2.1/1/1_wayne_0_105_105.wav'
    # wavFile = 'D:/DevelopProj/Yuji/Audio2Face/FACEGOOD-Audio2Face/code/train/wav/1114_1_01.wav'
    # wavFile = 'F:/Beat/3D-ETF/3D-ETF/HDTF/all/WRA_JoniErnst0_000.wav'
    # anno =  'F:/Beat/3D-ETF/3D-ETF/HDTF/all/WRA_JoniErnst0_000.npy'
    
    # wavFile = 'data/test/man.wav'
    # feature = audio_mfcc(wavFile)
    feature = audio_decoder(wavFile,max_duration=max_duration)
    
    if len(feature.shape) == 4:
        feature_temp = np.squeeze(feature_temp,3)

    blendshape = None
    if os.path.exists(anno):       
        # blendshape is shorter
        # blendshape_temp = np.loadtxt(target_path+blendshape_files[i])
        blendshape_temp = []
        if str.endswith(anno,'.json'):
            with open(anno,'r',encoding='utf8')as fp:
                file_contents = fp.read()                
                parsed_json = json.loads(file_contents)

                lastTime = parsed_json['frames'][-1]['time']
                frameCount = len(parsed_json['frames'])

                for j in range(len(parsed_json['frames'])):
                    if j%2 != 0:
                        continue
                    item = parsed_json['frames'][j]
                    weights = []
                    for i,num in enumerate(item['weights']):
                        if ARKitBlendShapeLocation.JawForward.value - 1 <= i and i <= ARKitBlendShapeLocation.MouthUpperUpRight.value - 1:
                            weights.append(num)
                        else:
                            weights.append(0.0)
                    blendshape_temp.append(weights)
            blendshape_temp = np.array(blendshape_temp)
    
        elif str.endswith(anno,'.npy'):
            blendshape_temp = np.load(anno)
            blendshape_temp = blendshape_temp[:,:-1]
        
        blendshape = np.array(blendshape_temp)  
    feature_cut,blendshape_cut = cut(feature, blendshape)
    print(f'bs:{blendshape.shape} cut:{blendshape_cut.shape} feature:{feature.shape} cut:{feature_cut.shape}')

def loadNpy():
    
    npyFile = 'F:/Beat/3D-ETF/3D-ETF/HDTF/blendshape/RD_Radio1_000.npy'
    bs = np.load(npyFile)
    print(bs)

if __name__ == '__main__':
    main()
    # test_wav_json_pair()
    # loadNpy()
