import os,sys
import time
import numpy as np
sys.path.append(os.getcwd())

import torch
import torchvision.models as models
from torch.utils.data import DataLoader
import torchvision.models as models

from InitPose.models.pose_cnn_ssm import PoseSSM
from InitPose.models.PoseAttentionNet import PoseAttention
from InitPose.models.pose_cnn_fft import PoseCNN 
from InitPose.lib.dataset.PROPSPoseDataset import PROPSPoseDataset
from InitPose.lib.options.config import Config
from InitPose.lib.utils import reset_seed
from InitPose.lib.opti.optimizer  import  save_on_master,get_optimizer,get_scheduler

from visualizer.base_visualizer import BaseVisualizer as Visualizer
torch.autograd.set_detect_anomaly(True)
#------------------------------------------------------------------------#cuda
os.environ['CUDA_VISIBLE_DEVICES'] = '0,2'

#------------------------------------------------------------------------#PARAMS
opt = Config(config_file="InitPose/lib/options/config.json").get_config()
opt.run_name = time.strftime("%Y-%m-%d-%H-%M-%S")

#-----------------------------------------------------------------------#set seed
reset_seed(23)

# -----------------------------------------------------------------------#device
if torch.cuda.is_available():
    print("Good to go!")
    DEVICE = torch.device("cuda:0")
    
#--------------------------------------------------------------------------#DATA
train_dataset = PROPSPoseDataset("./","train",opt.obj_id)
val_dataset = PROPSPoseDataset("./", "test",opt.obj_id)

train_sampler = None
val_sampler = None

train_dataloader = DataLoader(dataset=train_dataset, batch_size=opt.batch_size, num_workers=opt.num_workers, sampler=train_sampler)
val_dataloader = DataLoader(dataset=val_dataset, batch_size=1, num_workers=opt.num_workers, sampler=val_sampler)

print(f"Dataset sizes: train ({len(train_dataset)}), val ({len(val_dataset)})")

#----------------------------------------------------------------------------#MODEL to options
if opt.model_name=="CNN":
    vgg16 = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1)
    model = PoseCNN(
                    pretrained_backbone=vgg16,
                    models_pcd=torch.tensor(train_dataset.models_pcd).to(DEVICE, dtype=torch.float32),
                    cam_intrinsic=train_dataset.cam_intrinsic,
                    using_fft=opt.using_fft,
                    using_loss_model=opt.using_loss_model).to(DEVICE)
elif opt.model_name=="FFT":
    model =PoseAttention(pretrained_backbone=None,
                         models_pcd=torch.tensor(train_dataset.models_pcd).to(DEVICE, dtype=torch.float32),
                         cam_intrinsic=train_dataset.cam_intrinsic,
                         using_loss_model=opt.using_loss_model,
                         num_res=opt.num_res,
                         using_normal=opt.using_normal,
                         using_conv=opt.using_conv,
                         LayerNorm_type=opt.LayerNorm_type,
                         rotation_mode=opt.rotation_mode).to(DEVICE)
    
elif opt.model_name=="SSM":
    vgg16 = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1)
    model = PoseSSM(pretrained_backbone=vgg16,
                    models_pcd=torch.tensor(train_dataset.models_pcd).to(DEVICE, dtype=torch.float32),
                    cam_intrinsic=train_dataset.cam_intrinsic,
                    using_fft=opt.using_fft,
                    using_loss_model=opt.using_loss_model).to(DEVICE)

#-----------------------------------------------------------------------------#number of parameters
print("Model loaded params\n","-----------------------------------------------------#")
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
print("--------------------------------------------------------------------------#")
#------------------------------------------------------------------------------# visualizer
visualizer = Visualizer(opt)

#------------------------------------------------------------------------------#optimizer
optimizer = get_optimizer(opt.optimizer,model)
scheduler = get_scheduler(opt.scheduler, optimizer)

#------------------------------------------------------------------------------#train start
total_iters = 0

for epoch in range(opt.epochs):
    train_loss = []
    train_dataloader.dataset.dataset_type = "train"
    
    epoch_start_time = time.time()  # timer for entire epoch
    iter_data_time = time.time()    # timer for data loading per iteration

    epoch_iter = 0                  # the number of training iterations in current epoch, reset to 0 every epoch

    current_learning_rate = optimizer.param_groups[0]['lr']

    for batch in train_dataloader:
        model.train()
        total_iters += opt.batch_size
        epoch_iter += opt.batch_size
        
        for item in batch:
            batch[item] = batch[item].to(DEVICE)
        
        times = {}                     # recording compuation time
        iter_start_time = time.time()  # timer for computation per iteration
        times['data'] = iter_start_time - iter_data_time

        loss_dict = model(batch)
        
        optimizer.zero_grad()
        total_loss = opt.a*loss_dict['loss_segmentation'] + \
                     opt.b*loss_dict['loss_centermap']+\
                     opt.c*loss_dict['loss_R']
                     
        total_loss.backward()
        optimizer.step()
        train_loss.append(total_loss.item())
        times['comp'] = time.time() - iter_start_time
        
        if total_iters % opt.print_freq == 0:    # print training losses and save logging information to the disk
            visualizer.update_state(epoch, total_iters,times=times)
            visualizer.plot_current_losses(loss_dict)

        if total_iters % opt.save_latest_freq == 0:   # cache our latest model every <save_latest_freq> iterations
            print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
            
            save_on_master({
            'model': model.state_dict(),
            }, os.path.join(visualizer.log_path,"model_latest.pth"))

        iter_data_time = time.time()
        
        
    if epoch % opt.save_epoch_freq == 0:              # cache our model every <save_epoch_freq> epochs
        print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
        save_on_master({
        'model': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'learning_rate': current_learning_rate,  # Save current learning rate
        'epoch': epoch,
        }, os.path.join(visualizer.log_path,"model_%d.pth" % epoch))
        
    scheduler.step()
    print('End of epoch %d / %d \t  lr %f Time Taken: %d sec' % (epoch, opt.epochs, current_learning_rate,time.time() - epoch_start_time))
    torch.cuda.empty_cache()

