import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import shutil
import time
from datetime import datetime

from pipeline.dataset import WavBlendshapeDataset,DataPreprocessor
from pipeline.feature_dataset.dataset import LazyAudioFacialDataset
from models import NvidiaNet, LSTMNvidiaNet, FullyLSTM
# from models_testae import *
from global_config import *
from preprocess import bs_preprocess
from audio2face import *
from lstm import *

# gpu setting
gpu_id = 0
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)

need_shuffle = False
generate_train_pkl = True
generate_val_pkl = True

best_loss = 10000000
resume_train = False

learning_rate = 0.01
learning_rate_scale = 0.9975
batch_size = 1024 * 8
# batch_size = 512
epochs = 800
eval_step = 1
save_step = 25

fps = 60
sr = 16000
win_length = 0.52
# win_length = 2.14

n_bs = ARKitBlendShapeLocation.MouthUpperUpRight.value - ARKitBlendShapeLocation.JawForward.value + 1

train_data_name = 'BEAT/1'
# train_data_name = '3D-ETF/3D-ETF/all-1'
train_data_dir = f"D:/Beat/{train_data_name}"

# train_data_name = 'Neurosync_Audio2Face_Dataset/Male'
# train_data_dir = f"D:/DevelopProj/Yuji/YProject/YBigModelProject/DownloadRoot/datasets/AnimaVR/{train_data_name}"


train_max_wav_count = 5
train_start_count = 0
train_data_name_rep = train_data_name.replace('/','-')
train_data_name_for_save = f'{train_data_name_rep}-dlen{train_max_wav_count}'

val_data_name = 'BEAT/1'
# val_data_name = '3D-ETF/3D-ETF/all-1'
val_data_dir = f"D:/Beat/{val_data_name}"

# val_data_name = 'Neurosync_Audio2Face_Dataset/Male'
# val_data_dir = f"D:/DevelopProj/Yuji/YProject/YBigModelProject/DownloadRoot/datasets/AnimaVR/{val_data_name}"

val_max_wav_count = 1
val_start_count = 0
val_data_name_rep = val_data_name.replace('/','-')
val_data_name_for_save =  f'{val_data_name_rep}-dlen{val_max_wav_count}'


# model = Audio2BS(n_bs)
# model = MFCCAudio2BSLSTM(n_bs)
# model = LFCCAudio2BSLSTM(n_bs)
model = MelSpectrogramAudio2BSLSTM(n_bs)
# model = MFCCAudio2BSNvidiaNet(n_bs)
# model = LSTMNvidiaNetX(61,16,256)
# model = AudioToBlendshape(256,n_bs)

k_rec = 2.0
k_vel = 2.0
# loss_func = FaceFormerLoss(k_rec, k_vel, 3)
loss_func = FaceFormerLoss(k_rec, k_vel, 1)
# loss_func = nn.MSELoss()

dataroot = os.path.join(projroot,'data/pipeline')
checkpoint_name = f'{train_data_name_for_save}_{type(model).__name__}_{str(loss_func)}_wl{win_length}_bs{batch_size}'
checkpoint_path = os.path.join(dataroot,checkpoint_name)
ckp = os.path.join(checkpoint_path,'checkpoint-epoch400.pth.tar')
# ckp = os.path.join(checkpoint_path,'model_best.pth.tar')

infer_device = 'cpu'

def main():  
    global best_loss
    global model
         
    
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
        
    train_data_pkl = os.path.join(dataroot,f'{train_data_name_for_save}_train.pkl')
    val_data_pkl = os.path.join(dataroot,f'{val_data_name_for_save}_val.pkl')
    if generate_train_pkl:
        trainDataPreprocessor = DataPreprocessor(train_data_dir,train_max_wav_count,need_shuffle,train_start_count)   
        trainDataPreprocessor.Save(train_data_pkl)
    if generate_val_pkl:
        valDataPreprocessor = DataPreprocessor(val_data_dir,val_max_wav_count,need_shuffle,val_start_count)  
        valDataPreprocessor.Save(val_data_pkl)
    
     
    if 'Neurosync' in train_data_name:
        trainDataSet = LazyAudioFacialDataset(train_data_dir,win_length,fps,train_max_wav_count,force_reprocess=generate_train_pkl)
        valDataSet = LazyAudioFacialDataset(val_data_dir,win_length,fps,val_max_wav_count,force_reprocess=generate_val_pkl)
        
    else:
        trainDataSet = WavBlendshapeDataset(train_data_pkl,win_length,fps,sr)
        valDataSet = WavBlendshapeDataset(val_data_pkl,win_length,fps,sr)
    
    
    
    if resume_train == True:
        checkpoint = torch.load(ckp)
        startEpoch = checkpoint['epoch']
        print("model epoch {} loss: {}".format(checkpoint['epoch'], checkpoint['eval_loss']))
        model.load_state_dict(checkpoint['state_dict'])
    else:
        startEpoch=0
    

    # get data
    train_loader = torch.utils.data.DataLoader(trainDataSet, batch_size=batch_size, shuffle=False, num_workers=0)
    val_loader = torch.utils.data.DataLoader(valDataSet, batch_size= batch_size, shuffle=False, num_workers=0)

    # define loss and optimiser
    # lossfunc = nn.MSELoss() #??.cuda()   
    
    optimizer = optim.Adam(model.parameters(),lr=learning_rate)
    # optimizer = optim.Adadelta(model.parameters())
    # optimizer = optim.AdamW (model.parameters(), lr=learning_rate, weight_decay=0.001) 
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=20, eta_min=0)
    # scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=learning_rate_scale)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min',patience=5)


    if torch.cuda.is_available():
        model = model.cuda()

    # training
    print('------------\n Training begin at %s' % datetime.now())
  
    for epoch in range(startEpoch,epochs):
        start_time = time.time()

        model.train()
        train_loss = 0.
        train_looss_count = 0.
        for i, (input, target) in enumerate(train_loader):
            
            
            input = input.cuda()
            target = target.cuda()
            # target_var = bs_preprocess(target_var)

            # compute model output
            # audio_z, bs_z, output = model(input_var, target_var)
            # loss = criterion(output, target_var)
            # audio_z, bs_z, output, mu, logvar = model(input_var,target_var) # method2: loss change
            # loss = loss_function(output, target_var, mu,+ logvar)
            
            optimizer.zero_grad()
            
            
            output = model(input)
            loss = loss_func(output, target)               


            # compute gradient and do the backpropagate
            loss.backward()
            optimizer.step()
            
            train_loss += loss.data.item() * input.shape[0]
            train_looss_count += input.shape[0]
            # if i % print_freq == 0:
            #     print('Training -- epoch: {} | iteration: {}/{} | loss: {:.6f} \r'
            #             .format(epoch+1, i, len(train_loader), loss.data[0]))

        train_loss /= train_looss_count
        # print('Glance at training   z: max/min of hidden audio(%.4f/%.4f), blendshape()'% (max(audio_z.data[0]), min(audio_z.data[0])))
      


        model.eval()
        eval_loss = 0.
        eval_loss_count = 0.
        with torch.no_grad():
            for i, (input, target) in enumerate(val_loader):
                
                input = input.cuda()                
                target = target.cuda()

                # compute output temporal?!!
                # audio_z, bs_z, output = model(input_var, target_var)
                # loss = criterion(output, target_var)
                # audio_z, bs_z, output, mu, logvar = model(input_var, bs_var) # method2: loss change
                # loss = loss_function(output, target_var, mu, logvar)

                
                output= model(input) # method2: loss change
                
                
                loss_val = loss_func(output, target)   
                eval_loss += loss_val.data.item() * input.shape[0]
                eval_loss_count += input.shape[0]
            eval_loss /=  eval_loss_count

        # count time of 1 epoch
        past_time = time.time() - start_time
        
        if isinstance(scheduler,torch.optim.lr_scheduler.ReduceLROnPlateau):
            scheduler.step(train_loss)
        else:
            scheduler.step()
        # print('Glance at validating z: max/min of hidden audio(%.4f/%.4f), blendshape()' % (max(audio_z.data[0]), min(audio_z.data[0])))

        # print('Evaluating -- epoch: {} | loss: {:.6f} \r'.format(epoch+1, eval_loss/len(val_loader)))
        lr = optimizer.param_groups[0]['lr']
        print('epoch: {:03} | train_loss: {:.6f} | eval_loss: {:.6f} | {:.4f} sec/epoch | lr:{}\r'
            .format(epoch+1, train_loss, eval_loss, past_time,lr))
   

        # if epoch % 10 == 0:
        #     learning_rate = learning_rate * learning_rate_scale
        #     optimizer.param_groups[0]['lr'] = learning_rate
        #     print(f'learning_rate:{learning_rate}')

        if (epoch+1) % eval_step == 0:
            # save best model on val
            is_best = eval_loss < best_loss
            best_loss = min(eval_loss, best_loss)
            if is_best:
                torch.save({
                        'epoch': epoch + 1,
                        'state_dict': model.state_dict(),
                        'eval_loss': best_loss,
                    }, os.path.join(checkpoint_path,'model_best.pth.tar'))

        # save models every 100 epoch
        if (epoch+1) % save_step == 0:
            torch.save({
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    
                    'eval_loss': eval_loss,
                }, os.path.join(checkpoint_path,f'checkpoint-epoch{epoch+1}.pth.tar'))

    print('Training finished at %s' % datetime.now())

# def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
#     torch.save(state, checkpoint_path+filename)
#     if is_best:
#         shutil.copyfile(checkpoint_path+filename, checkpoint_path+'model_best.pth.tar')

if __name__ == '__main__':
    main()
