import torch
import time
import argparse
import os
import numpy as np
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
from scipy.signal import savgol_filter

import scipy.io.wavfile as wav

from models import FullyLSTM, NvidiaNet
# from models_testae import *
from models_lstm import *
from txt2json import *
from models import * 
import librosa
print(f'librosa: {librosa.__version__}')
from audio_encode import *
from global_config import * 
from etmodel import *
from bs37ToARKit import ARKitBlendShapeLocation
from preprocess import bs_preprocess

# parser arguments
parser = argparse.ArgumentParser(description='Synthesize wave to blendshape')
parser.add_argument('--wav', type=str, help='wav to synthesize')
parser.add_argument('--smooth', type=bool, default=True)
parser.add_argument('--pad', type=bool, default=True)
parser.add_argument('--control', type=bool, default=False)
parser.add_argument('--wav_list', action='append',default=None, type=str, required=False)

# args = parser.parse_args()
# print(args)
projroot = os.path.join(os.path.dirname(__file__),'.')


audio_decoder,start_videoframe,end_videoframe = create_audio_decoder()


def cutLowAndMul(input,loc,cutLow,mul = 1.0):
    input[:,loc] -= cutLow
    
    if isinstance(input[:,loc], torch.Tensor):
        input[:,loc] = torch.where(input[:,loc] < 0,0,input[:,loc])     
            
    elif isinstance(input[:,loc], np.ndarray):
        input[:,loc] = np.where(input[:,loc] < 0,0,input[:,loc])
    input[:,loc] *= mul
    
    return input
    
def bs_postprocess(input):
    if postprocess_synthesis:
        offset = 1
        if input.shape[1] == 27:
            offset = ARKitBlendShapeLocation.JawForward.value
        elif input.shape[1] > 27:        
            input[:,:ARKitBlendShapeLocation.JawForward.value-offset] = 0
            input[:,ARKitBlendShapeLocation.MouthUpperUpRight.value-offset + 1:] = 0
        
        if 'HDTF' in dataset_type:
            pass
        elif 'BEAT' in dataset_type or 'Beat' in dataset_type:
            amplify = 3.5
            shrink = 0.5
            stay = 1.0
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthPucker.value-offset,0.0,amplify)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthPressLeft.value-offset,0.0,amplify)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthPressRight.value-offset,0.0,amplify)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthRollLower.value-offset,0.0,amplify)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthRollUpper.value-offset,0.0,amplify)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthDimpleLeft.value-offset,0.0,amplify)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthDimpleRight.value-offset,0.0,amplify)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthFrownLeft.value-offset,0.0,amplify)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthFrownRight.value-offset,0.0,amplify)
            
            input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthSmileLeft.value-offset,0.0,shrink)
            input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthSmileRight.value-offset,0.0,shrink)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthLowerDownLeft.value-offset,0.0,shrink)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthLowerDownRight.value-offset,0.0,shrink)
            
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthUpperUpLeft.value-offset,0.0,shrink)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthUpperUpRight.value-offset,0.0,shrink)
        
            input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthClose.value-offset,0.0,1.25)    
            input = cutLowAndMul(input,ARKitBlendShapeLocation.JawOpen.value-offset,0.25,1.25)
            input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthFunnel.value-offset,0.0,0.5)
            
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthShrugLower.value-offset,0.0,shrink)
            # input = cutLowAndMul(input,ARKitBlendShapeLocation.MouthShrugUpper.value-offset,0.0,shrink)
        
        
        
        if isinstance(input, torch.Tensor):
            input = torch.where(input > 1.0, 1.0, input)     
            
        elif isinstance(input, np.ndarray):
            input = np.where(input > 1.0, 1.0, input)
            
        if isinstance(input, torch.Tensor):
            input = torch.where(input < 0,0,input)     
            
        elif isinstance(input, np.ndarray):
            input = np.where(input < 0,0,input)
        return input
    else:
        return input


class BSGenerator():    
    def __init__(self,ckp):
        self.smooth = True
        self.pad = False
        
        # restore checkpoint model
        print("=> loading checkpoint '{}'".format(ckp))
        checkpoint = torch.load(ckp,map_location=torch.device(infer_device))
        print("model epoch {} loss: {}".format(checkpoint['epoch'], checkpoint['eval_loss']))

        model,_ = create_model()
        model.load_state_dict(checkpoint['state_dict'])

            
        model = model.to(infer_device)

        # evaluation for audio feature
        model.eval()
        self.model = model

    def pad_blendshape(self,blendshape):
        return np.pad(blendshape, [(16, 16), (0, 0)], mode='constant', constant_values=0.0)
    def computeBS(self,wav_file):
        
        start_time = time.time()
        ## process audio
        audio = audio_decoder(wav_file,max_duration=10000)
        if audio is None:
            refn = wav_file + '.re.wav'
            resample4wavs(wav_file,refn,16000)
            audio = audio_decoder(refn)
            
        feature = torch.from_numpy(audio)
        # print('Feature extracted ', feature.shape)
        target = torch.from_numpy(np.zeros((feature.shape[0],51)))


        ## build dataset
        test_loader = DataLoader(TensorDataset(feature, target),
                        batch_size=100, shuffle=False, num_workers=0)

        for i, (input, target) in enumerate(test_loader):
            # target = target.cuda(async=True)
            input_var = Variable(input.float(), volatile=True).to(infer_device)
            target_var = Variable(target.float(), volatile=True).to(infer_device)
            bs_var = Variable(torch.zeros(target.shape).to(infer_device).float(), volatile=True).to(infer_device)

            output = self.model(input_var)

            output = output.detach().cpu()
            if i == 0:
                output_cat = output
            else:
                output_cat = torch.cat((output_cat, output), 0)
                
            torch.cuda.empty_cache()
            # print(type(output_cat.cpu().numpy()), output_cat.cpu().numpy().shape)

        # convert back *100
        output_cat = output_cat.numpy()
        

        if self.smooth:
            #smooth3--savgol_filter
            win = 9; polyorder = 2
            for i in range(bs_num):
                power = output_cat[:,i]
                power_smooth = savgol_filter(power, win, polyorder, mode='nearest')
                output_cat[:, i] = power_smooth

        # pad blendshape
        if self.pad:
            output_cat = self.pad_blendshape(output_cat)

        # count time for synthesis
        past_time = time.time() - start_time
        print(f"Synthesis finished in {past_time:.4f} sec! filename: {wav_file}")

        output_51 = np.zeros([output_cat.shape[0],51])
        output_51[:,ARKitBlendShapeLocation.JawForward.value - 1 : ARKitBlendShapeLocation.MouthUpperUpRight.value] = output_cat
        
        output_51 = bs_postprocess(output_51)
        
        bsJson = tensorToUnityBSJson(output_51,bs_scale,unityBSDelay)
        
        return bsJson

class BSGeneratorET():    
    def __init__(self):    
        self.model = EmoTalk()
        model_path = "./checkpoint/et/EmoTalk.pth"
        if(os.path.exists(model_path)):
            self.model.load_state_dict(torch.load(model_path, map_location=torch.device(infer_device)), strict=False)
        self.model = self.model.to(torch.device(infer_device))
        self.model.eval()

    
    def computeBS(self,wav_file):
        
        eye1 = np.array([0.36537236, 0.950235724, 0.95593375, 0.916715622, 0.367256105, 0.119113259, 0.025357503])
        eye2 = np.array([0.234776169, 0.909951985, 0.944758058, 0.777862132, 0.191071674, 0.235437036, 0.089163929])
        eye3 = np.array([0.870040774, 0.949833691, 0.949418545, 0.695911646, 0.191071674, 0.072576277, 0.007108896])
        eye4 = np.array([0.000307991, 0.556701422, 0.952656746, 0.942345619, 1.025857186, 0.148335218, 0.017659493])
            
        start_time = time.time()
       
        speech_array, sr = librosa.load(os.path.join(wav_file), sr=16000)
                
        totalSampleCount = speech_array.shape[0]
        
        audioDuraton = totalSampleCount / sr
        
        stepSampleCount = 30 * sr
        
        if totalSampleCount % stepSampleCount > 0:
            stepCount = (int)(totalSampleCount / stepSampleCount) + 1
        else:
            stepCount = (int)(totalSampleCount / stepSampleCount)
    

        output = None
        for step in range(stepCount):        
            startPos = step * stepSampleCount
            endPos = min((step + 1) * stepSampleCount, totalSampleCount)
            
            stepData = speech_array[startPos:endPos]
            frameLen = (int)(np.ceil((endPos - startPos) / sr * 30))
            
            
            audio = torch.FloatTensor(stepData).unsqueeze(0).to(self.device)
            level = torch.tensor([1]).to(self.device)
            person = torch.tensor([0]).to(self.device)
            prediction = self.model.predict(audio, level, person)
            prediction = prediction.squeeze().detach().cpu().numpy()
            prediction = prediction[:,:-1] 
            
            if output is None:
                output = prediction
            else:
                output = np.concatenate([output,prediction],axis=0)
            
        # count time for synthesis
        past_time = time.time() - start_time
        print(f"Synthesis finished in {past_time:.4f} sec! filename: {wav_file} audioDuration: {audioDuraton}")
        
        output_51 = output
        
        output_51 = bs_postprocess(output_51)
        
        bsJson = tensorToUnityBSJson(output_51,bs_scale,unityBSDelay)
        
        
        return bsJson
 

import sys
sys.path.append(os.path.join(os.path.dirname(__file__),'pipeline'))
import pipeline.train as pl
class BSGeneratorV2():    
    def __init__(self):
        
        model = pl.model
        infer_device = pl.infer_device
    
        self.smooth = True
        self.pad = False
        
        self.n_bs = ARKitBlendShapeLocation.MouthUpperUpRight.value - ARKitBlendShapeLocation.JawForward.value + 1
        
        ckp = pl.ckp
        checkpoint = torch.load(ckp,map_location=infer_device)
        model.load_state_dict(checkpoint['state_dict'])
        model.to(infer_device)
        self.model = model
        
        self.win_length = pl.win_length
        self.fps = pl.fps

    
    def computeBS(self,wav_file):
        
        start_time = time.time()     
        
        waveform, sr = torchaudio.load(wav_file)
        wavedata = waveform[0]
        l_pad_samples = int((sr * self.win_length / 2))
        n_pad_samples = int(sr * self.win_length / 2)
        pad_wavedata = torch.concatenate([torch.zeros(l_pad_samples), wavedata, torch.zeros(2 * n_pad_samples)])
        
        duration = wavedata.shape[0] / sr
        
        bs_array = []
        batch_size = 1024
        
        input_array = None
        output_array = None
        total_frame_count = (int)(duration * self.fps)
        for idx in range(total_frame_count):   
            start = idx * sr // self.fps
            end = start + n_pad_samples * 2
            # check if the audio is long enough
            if end > len(pad_wavedata):
                print(f"Audio is not long enough to get fragment: {end} > {len(pad_wavedata)}")
                break
            input = pad_wavedata[start:end]
            input = torch.unsqueeze(input,0)
            
            if input_array == None:
                input_array = input
            else:
                input_array = torch.concatenate((input_array,input),0)
            
            if input_array.shape[0] == batch_size or idx == total_frame_count - 1:     
                input_array = input_array.to(infer_device)       
                output = self.model(input_array)
                input_array = None
            
                
                if output_array == None:
                    output_array = output
                else:
                    output_array = torch.concatenate((output_array,output),0)
                 
            
        bs = output_array.detach().cpu().numpy()
            
        smooth = True
        if smooth:
            win = 9; polyorder = 2
            for i in range(27):
                power = bs[:,i]
                power_smooth = savgol_filter(power, win, polyorder, mode='nearest')
                bs[:, i] = power_smooth
        
        output_51 = np.zeros([bs.shape[0],51])
        output_51[:,ARKitBlendShapeLocation.JawForward.value - 1 : ARKitBlendShapeLocation.MouthUpperUpRight.value] = bs
            
        # output_51 = bs_postprocess(output_51)
            
        bsJson = tensorToUnityBSJson(output_51,1.2,0.0)
        
        # count time for synthesis
        past_time = time.time() - start_time
        print(f"V2 Synthesis finished in {past_time:.4f} sec! filename: {wav_file}")
        
        return bsJson



from pipeline.utils.audio.extraction.extract_features import extract_audio_features
class BSGeneratorV3():    
    def __init__(self):
        
        model = pl.model
        infer_device = pl.infer_device
    
        self.smooth = True
        self.pad = False
        
        self.n_bs = ARKitBlendShapeLocation.MouthUpperUpRight.value - ARKitBlendShapeLocation.JawForward.value + 1
        
        ckp = pl.ckp
        checkpoint = torch.load(ckp)
        model.load_state_dict(checkpoint['state_dict'])
        model.to(infer_device)
        self.model = model
        
        self.win_length = pl.win_length
        self.fps = pl.fps

    
    def computeBS(self,wav_file):
        
        start_time = time.time()     
        
        waveform, sr = torchaudio.load(wav_file)
        
        audio_feature = extract_audio_features(wav_file,sr)
        
        micro_batch_size = (int)(self.win_length * sr)       
         # Calculate centered window boundaries
        half_window = (int)(micro_batch_size // 2)
        window_start = start - half_window
        window_end = start + half_window

        # Create zero-padded array
        audio_segment = np.zeros((micro_batch_size, audio_feature.shape[1])).astype(np.float32)
        
        wavedata = waveform[0]
        l_pad_samples = int((sr * self.win_length / 2))
        n_pad_samples = int(sr * self.win_length / 2)
        pad_wavedata = torch.concatenate([torch.zeros(l_pad_samples), wavedata, torch.zeros(2 * n_pad_samples)])
        
        duration = wavedata.shape[0] / sr
        
        bs_array = []
        batch_size = 1024
        
        input_array = None
        output_array = None
        total_frame_count = (int)(duration * self.fps)
        for idx in range(total_frame_count):   
            start = idx * sr // self.fps
            end = start + n_pad_samples * 2
            # check if the audio is long enough
            if end > len(pad_wavedata):
                print(f"Audio is not long enough to get fragment: {end} > {len(pad_wavedata)}")
                break
            input = pad_wavedata[start:end]
            input = torch.unsqueeze(input,0)
            
            if input_array == None:
                input_array = input
            else:
                input_array = torch.concatenate((input_array,input),0)
            
            if input_array.shape[0] == batch_size or idx == total_frame_count - 1:     
                input_array = input_array.to(infer_device)       
                output = self.model(input_array)
                input_array = None
            
                
                if output_array == None:
                    output_array = output
                else:
                    output_array = torch.concatenate((output_array,output),0)
                 
            
        bs = output_array.detach().cpu().numpy()
            
        smooth = True
        if smooth:
            win = 9; polyorder = 2
            for i in range(27):
                power = bs[:,i]
                power_smooth = savgol_filter(power, win, polyorder, mode='nearest')
                bs[:, i] = power_smooth
        
        output_51 = np.zeros([bs.shape[0],51])
        output_51[:,ARKitBlendShapeLocation.JawForward.value - 1 : ARKitBlendShapeLocation.MouthUpperUpRight.value] = bs
            
        # output_51 = bs_postprocess(output_51)
            
        bsJson = tensorToUnityBSJson(output_51,1.2,0.0)
        
        # count time for synthesis
        past_time = time.time() - start_time
        print(f"V2 Synthesis finished in {past_time:.4f} sec! filename: {wav_file}")
        
        return bsJson

            
def Gen(files):
    outDir = './synthesis'
    os.makedirs(outDir,exist_ok=True)
    generator = BSGenerator(ckp=ckp)   
             
    for wavfile in files:        
        
        fn = wavfile.split('/')[-1].split('.')[0]
        
        data = generator.computeBS(wav_file=wavfile)        
    
        jstr = json.dumps(asdict(data))   
        
        jfilePath = f'{outDir}/generator_{fn}_{model_name}_{feature_type}_{dataset_type}_{checkpoint_ephoch}_{labelType}_{lossType}_{infer_device}.bs_weight.json'
        
        f2 = open(jfilePath, 'w')
        f2.write(jstr)
        f2.close()
def GenET(files):
    generator = BSGeneratorET()   
    
    for wavfile in files:        
        
        fn = wavfile.split('/')[-1].split('.')[0]
        
        data = generator.computeBS(wav_file=wavfile)      
        if data is not None:      
            jstr = json.dumps(asdict(data))    
            jfilePath = f'./synthesis/generatoret_{fn}.bs_weight.json'
            
            f2 = open(jfilePath, 'w')
            f2.write(jstr)
            f2.close()
if __name__ == '__main__':
    
    files = []
    # files.append('data/test/woman.wav' )
    files.append('data/test/18_daiki_0_8_8.wav')
    files.append('data/test/1_wayne_0_1_1.wav' )
    files.append('data/test/woman.wav' )
    # files.append('data/test/recordClip.wav' )
    # files.append('data/test/WRA_JoniErnst0_000.wav' )
    # files.append('data/test/22_luqi_2_87_94.wav' )
    # files.append('D:/Temp/1/1/audio0.wav' )
    files.append('data/test/resultAudio.wav' )
                
    Gen(files)
    # GenET(files)
    
  
