import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import os
import shutil
import time
from datetime import datetime

from dataset import BlendshapeDataset
from models import NvidiaNet, LSTMNvidiaNet, FullyLSTM
# from models_testae import *
from global_config import *
from preprocess import bs_preprocess

# gpu setting
gpu_id = 0
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)



train_dir = combine_path
train_data_file = ''
train_label_file = ''
val_data_file = ''
val_label_file = ''
if bs_num == 51:
    train_data_file = os.path.join(train_dir,'train.feature.npy')
    train_label_file = os.path.join(train_dir,'train.bs.npy')
    val_data_file = os.path.join(train_dir,'val.feature.npy')
    val_label_file = os.path.join(train_dir,'val.bs.npy')
elif bs_num == 37:
    train_data_file = os.path.join(train_dir,'train_data.npy')
    train_label_file = os.path.join(train_dir,'train_label.npy')
    val_data_file = os.path.join(train_dir,'val_data.npy')
    val_label_file = os.path.join(train_dir,'val_label.npy')
else:    
    train_data_file = os.path.join(train_dir,'train.feature.npy')
    train_label_file = os.path.join(train_dir,'train.bs.npy')
    val_data_file = os.path.join(train_dir,'val.feature.npy')
    val_label_file = os.path.join(train_dir,'val.bs.npy')


checkpoint_path = os.path.join(projroot,checkpoint_base_path , model_name ,feature_type,dataset_type,labelType,lossType)
os.makedirs(checkpoint_path,exist_ok=True)


# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
    # BCE = F.binary_cross_entropy(recon_x, x, size_average=False)
    MSE = F.mse_loss(recon_x, x)

    # see Appendix B from VAE paper:
    # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
    # https://arxiv.org/abs/1312.6114
    # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())

    # print('loss percent: MSE %.4f(%.6f), KLD %.4f, total %.4f'
    #     % (MSE.data[0], MSE.data[0]/(MSE.data[0]+KLD.data[0]), KLD.data[0], MSE.data[0]+KLD.data[0]))

    return MSE + 0.01*KLD



def main():
    global best_loss
    global learning_rate
   
    model,lossfunc = create_model()
    if resume_train == True:
        checkpoint = torch.load(ckp)
        startEpoch = checkpoint['epoch']
        print("model epoch {} loss: {}".format(checkpoint['epoch'], checkpoint['eval_loss']))
        model.load_state_dict(checkpoint['state_dict'])
    else:
        startEpoch=0
    

    print(model)
    # model = nn.DataParallel(model)

    # get data
    train_loader = torch.utils.data.DataLoader(
                    BlendshapeDataset(feature_file=os.path.join(data_path, train_data_file),
                                    target_file=os.path.join(data_path,  train_label_file),max_len=max_train_data_len),
                    batch_size=batch_size, shuffle=True, num_workers=0
                    )
    val_loader = torch.utils.data.DataLoader(
                    BlendshapeDataset(feature_file=os.path.join(data_path,  val_data_file),
                                    target_file=os.path.join(data_path,  val_label_file),max_len=max_val_data_len),
                    batch_size= batch_size, shuffle=False, num_workers=0
                    )

    # define loss and optimiser
    # lossfunc = nn.MSELoss() #??.cuda()   
    
    optimizer = optim.Adam(model.parameters(),lr=learning_rate)
    # optimizer = optim.Adadelta(model.parameters())
    # optimizer = optim.AdamW (model.parameters(), lr=learning_rate, weight_decay=0.001) 
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=20, eta_min=0)
    # scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=learning_rate_scale)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min',patience=5)


    if torch.cuda.is_available():
        model = model.cuda()

    # training
    print('------------\n Training begin at %s' % datetime.now())
  
    for epoch in range(startEpoch,epochs):
        start_time = time.time()

        model.train()
        train_loss = 0.
        for i, (input, target) in enumerate(train_loader):
            
            
            target = target.cuda()
            input_var = input.float().cuda()
            target_var = target.float().cuda()
            target_var = bs_preprocess(target_var)

            # compute model output
            # audio_z, bs_z, output = model(input_var, target_var)
            # loss = criterion(output, target_var)
            # audio_z, bs_z, output, mu, logvar = model(input_var,target_var) # method2: loss change
            # loss = loss_function(output, target_var, mu,+ logvar)
            
            optimizer.zero_grad()
            
            
            output = model(input_var)

            loss = lossfunc(output, target_var,motion_loss=motion_loss)       
                


            # compute gradient and do the backpropagate
            loss.backward()
            optimizer.step()
            
            train_loss += loss.data.item()
            # if i % print_freq == 0:
            #     print('Training -- epoch: {} | iteration: {}/{} | loss: {:.6f} \r'
            #             .format(epoch+1, i, len(train_loader), loss.data[0]))

        train_loss /= len(train_loader)
        # print('Glance at training   z: max/min of hidden audio(%.4f/%.4f), blendshape()'% (max(audio_z.data[0]), min(audio_z.data[0])))
      


        model.eval()
        eval_loss = 0.
        with torch.no_grad():
            for input_val, target_val in val_loader:
                
                input_val = input_val.float().cuda()                
                target_val = target_val.float().cuda()
                target_var = bs_preprocess(target_var)

                # compute output temporal?!!
                # audio_z, bs_z, output = model(input_var, target_var)
                # loss = criterion(output, target_var)
                # audio_z, bs_z, output, mu, logvar = model(input_var, bs_var) # method2: loss change
                # loss = loss_function(output, target_var, mu, logvar)

                
                output= model(input_val) # method2: loss change
                
                maxv = torch.max(output)
                minv = torch.min(output)
                
                
                loss_val = lossfunc(output,target_val,motion_loss=motion_loss)   

                eval_loss += loss_val.data.item()

            eval_loss /= len(val_loader)

        # count time of 1 epoch
        past_time = time.time() - start_time
        
        if isinstance(scheduler,torch.optim.lr_scheduler.ReduceLROnPlateau):
            scheduler.step(train_loss)
        else:
            scheduler.step()
        # print('Glance at validating z: max/min of hidden audio(%.4f/%.4f), blendshape()' % (max(audio_z.data[0]), min(audio_z.data[0])))

        # print('Evaluating -- epoch: {} | loss: {:.6f} \r'.format(epoch+1, eval_loss/len(val_loader)))
        lr = optimizer.param_groups[0]['lr']
        print('epoch: {:03} | train_loss: {:.6f} | eval_loss: {:.6f} | {:.4f} sec/epoch | lr:{}\r'
            .format(epoch+1, train_loss, eval_loss, past_time,lr))
   

        # if epoch % 10 == 0:
        #     learning_rate = learning_rate * learning_rate_scale
        #     optimizer.param_groups[0]['lr'] = learning_rate
        #     print(f'learning_rate:{learning_rate}')

        if (epoch+1) % eval_step == 0:
            # save best model on val
            is_best = eval_loss < best_loss
            best_loss = min(eval_loss, best_loss)
            if is_best:
                torch.save({
                        'epoch': epoch + 1,
                        'state_dict': model.state_dict(),
                        'eval_loss': best_loss,
                    }, os.path.join(checkpoint_path,'model_best.pth.tar'))

        # save models every 100 epoch
        if (epoch+1) % save_step == 0:
            torch.save({
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    
                    'eval_loss': eval_loss,
                }, os.path.join(checkpoint_path,f'checkpoint-epoch{epoch+1}.pth.tar'))

    print('Training finished at %s' % datetime.now())

# def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
#     torch.save(state, checkpoint_path+filename)
#     if is_best:
#         shutil.copyfile(checkpoint_path+filename, checkpoint_path+'model_best.pth.tar')

if __name__ == '__main__':
    # x = torch.arange(32*37).reshape(32,-1)
    # y = torch.split(x,16,0)
    # print(y) 
    main()
