import torch
import time
import argparse
import os
import numpy as np
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
from scipy.signal import savgol_filter

import scipy.io.wavfile as wav

from models import FullyLSTM, NvidiaNet
# from models_testae import *
from models_lstm import *
from txt2json import *
from models import * 
import librosa
print(f'librosa: {librosa.__version__}')
from audio_encode import *
from global_config import * 

# parser arguments
parser = argparse.ArgumentParser(description='Synthesize wave to blendshape')
parser.add_argument('--wav', type=str, help='wav to synthesize')
parser.add_argument('--smooth', type=bool, default=True)
parser.add_argument('--pad', type=bool, default=True)
parser.add_argument('--control', type=bool, default=False)
parser.add_argument('--wav_list', action='append',default=None, type=str, required=False)

args = parser.parse_args()
print(args)
projroot = os.path.join(os.path.dirname(__file__),'.')

    

result_path = os.path.join(projroot,'./synthesis')

audio_decoder,start_videoframe,end_videoframe = create_audio_decoder()

device = torch.device(infer_device)

def main():
    global result_file
    
    model,_ = create_model()
        

    # restore checkpoint model
    print("=> loading checkpoint '{}'".format(ckp))
    checkpoint = torch.load(ckp)
    print("model epoch {} loss: {}".format(checkpoint['epoch'], checkpoint['eval_loss']))

    model.load_state_dict(checkpoint['state_dict'])

    model = model.to(device)

    # evaluation for audio feature
    model.eval()
    
    files = []
    
    if args.wav is not None:
        files.append(args.wav)
    
    if args.wav_list is not None:
        for index,wav_file in enumerate(args.wav_list):
            files.append(wav_file)
            
    for index,wav_file in enumerate(files):
        
        wav_name = os.path.basename(wav_file).split('.')[0]
        result_file = os.path.join(result_path,f'{feature_type}-{dataset_type}-{model_name}-{wav_name}-{checkpoint_ephoch}-{infer_device}.txt')
        os.makedirs(os.path.dirname(result_file),exist_ok=True)
        # (rate, sig) = wav.read(args.wav)
        # audioData, sr = librosa.load(args.wav)
        # duration = librosa.get_duration(path=args.wav)
        # n_samples = int(round(duration * sr))

        start_time = time.time()
        ## process audio
        audio = audio_decoder(wav_file,max_duration=10000)
        if audio is None:
            refn = wav_file + '.re.wav'
            resample4wavs(wav_file,refn,16000)
            audio = audio_decoder(refn)
            
        feature = torch.from_numpy(audio)
        # print('Feature extracted ', feature.shape)
        target = torch.from_numpy(np.zeros((feature.shape[0],51)))


        ## build dataset
        test_loader = DataLoader(TensorDataset(feature, target),
                        batch_size=100, shuffle=False, num_workers=0)

        for i, (input, target) in enumerate(test_loader):
            # target = target.cuda(async=True)
            input_var = Variable(input.float(), volatile=True).to(device)
            target_var = Variable(target.float(), volatile=True).to(device)
            bs_var = Variable(torch.zeros(target.shape).to(device).float(), volatile=True).to(device)

            output = model(input_var)

            if i == 0:
                output_cat = output
            else:
                output_cat = torch.cat((output_cat, output), 0)
            # print(type(output_cat.cpu().numpy()), output_cat.cpu().numpy().shape)

        # convert back *100
        output_cat = output_cat.detach().cpu().numpy()

        if args.smooth:
            #smooth3--savgol_filter
            win = 9; polyorder = 3
            for i in range(bs_num):
                power = output_cat[:,i]
                power_smooth = savgol_filter(power, win, polyorder, mode='nearest')
                output_cat[:, i] = power_smooth

        # pad blendshape
        if args.pad:
            output_cat = pad_blendshape(output_cat)

        # count time for synthesis
        past_time = time.time() - start_time
        print("Synthesis finished in {:.4f} sec! Saved in {}".format(past_time, result_file))


        directGen = True
        if directGen:
            bsJson = tensorToUnityBSJson(output_cat,bs_scale)
            
            jstr = json.dumps(asdict(bsJson))
            print(jstr)
            
            jfilePath = result_file + '.bs_weight.json'
            f2 = open(jfilePath, 'w')
            f2.write(jstr)
            f2.close()
        else:
            with open(result_file, 'wb') as f:
                np.savetxt(f, output_cat, fmt='%.6f')           
            
            txtToUnityBSJson(result_file,bs_scale)

def pad_blendshape(blendshape):
    return np.pad(blendshape, [(start_videoframe, end_videoframe), (0, 0)], mode='constant', constant_values=0.0)


if __name__ == '__main__':
    main()
