import cv2
import time
import os,sys
from tqdm import tqdm
import numpy as np
from PIL import Image
sys.path.append(os.getcwd())
import itertools

import torch
from torch.utils.data import DataLoader,Subset
import torchvision.models as models
from torchvision.utils import save_image, make_grid

from InitPose.models.pose_cnn_ssm import PoseSSM
from InitPose.models.PoseAttentionNet import PoseAttention
from InitPose.models.pose_cnn_fft import PoseCNN 
from InitPose.lib.dataset.PROPSPoseDataset import PROPSPoseDataset

from InitPose.lib.options.config import Config
from InitPose.lib.metrics.pose_evaluator_lmo import PoseEvaluatorLMO

from visualizer.base_visualizer import BaseVisualizer as Visualizer

# test 对模型进行客观的评价 from deepac
# 指标 :ADD

# os.environ['CUDA_VISIBLE_DEVICES'] = '2'
DEBUG=False

if __name__ == "__main__":
    #-----------------------------------------------------------------------------#PARAMS
    print("--"*30)
    # folder = 'InitPose/checkpoints/Initpose/obj000001/2024-05-01-17-58-37/'
    # folder = 'InitPose/checkpoints/Initpose/obj000001/2024-05-01-21-35-52/'
    # folder = 'InitPose/checkpoints/Initpose/obj000002/2024-05-02-12-38-16/'
    # folder = 'InitPose/checkpoints/Initpose/obj000002/2024-05-02-16-40-05/'
    # folder = 'InitPose/checkpoints/Initpose/obj000003/2024-05-02-22-07-40/'
    # folder = 'InitPose/checkpoints/Initpose/obj000003/2024-05-03-12-13-23/'
    # folder = 'InitPose/checkpoints/Initpose/obj000003/2024-05-03-21-30-37/'
    # folder = 'InitPose/checkpoints/Initpose/obj000002/2024-05-04-01-19-02/'
    folder = 'InitPose/checkpoints/Initpose/obj000001/2024-05-04-11-01-48/'

    opt = Config(config_file=f"{folder}/config.json").get_config()

    #------------------------------------------------------------------------------#DATA
    val_dataset = PROPSPoseDataset("./", "test",opt.obj_id)
    val_dataloader   = DataLoader(dataset=val_dataset, batch_size=1,num_workers=opt.num_workers)
    if DEBUG:
        val_dataloader = itertools.islice(val_dataloader, 3)

    print(f"Dataset sizes: test ({len(val_dataset)})")
    #-----------------------------------------------------------------------------#Device
    if torch.cuda.is_available():
        print("using GPU!")
        DEVICE = torch.device("cuda:0")
    #----------------------------------------------------------------------------#MODEL to options
    if opt.model_name=="CNN":
        vgg16 = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1)
        model = PoseCNN(
                        pretrained_backbone=vgg16,
                        models_pcd=torch.tensor(val_dataset.models_pcd).to(DEVICE, dtype=torch.float32),
                        cam_intrinsic=val_dataset.cam_intrinsic,
                        using_fft=opt.using_fft,
                        using_loss_model=opt.using_loss_model).to(DEVICE)
    if opt.model_name=="SSM":
        vgg16 = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1)
        model = PoseSSM(
                        pretrained_backbone=vgg16,
                        models_pcd=torch.tensor(val_dataset.models_pcd).to(DEVICE, dtype=torch.float32),
                        cam_intrinsic=val_dataset.cam_intrinsic,
                        using_fft=opt.using_fft,
                        using_loss_model=opt.using_loss_model).to(DEVICE)
    elif opt.model_name=="FFT":
        model =PoseAttention(pretrained_backbone=None,
                            models_pcd=torch.tensor(val_dataset.models_pcd).to(DEVICE, dtype=torch.float32),
                            cam_intrinsic=val_dataset.cam_intrinsic,
                            using_loss_model=opt.using_loss_model,
                            num_res=opt.num_res,
                            using_normal=opt.using_normal,
                            using_conv=opt.using_conv,
                            LayerNorm_type=opt.LayerNorm_type,
                            rotation_mode=opt.rotation_mode).to(DEVICE)
    try:
        model.load_state_dict(torch.load(f"{folder}/model_latest.pth")['model'])
    except:
        model.load_state_dict(torch.load(f"{folder}/model_0.pth")['model'])
    finally:
        print(f"{opt.model_name} Model loaded")
    #------------------------------------------------------------------------------# print model parameters
    n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f'{opt.model_name} number of params:{n_parameters/1e6}M')
    print("--"*30)
    #------------------------------------------------------------------------------#metric
    metricer = PoseEvaluatorLMO(models=val_dataset.models, \
                                classes=val_dataset.classes,\
                                model_info=val_dataset.models_info, \
                                model_symmetry=val_dataset.models_symmetry,\
                                depth_scale=1)
    
    #-------------------------------------------------------------------------------# data
    time_start =time.time()
    print("test start !")
    preds_txt=[]
    preds=[]
    gts = []
    for batch in tqdm(val_dataloader):
        model.eval()
        for item in batch:
            batch[item] = batch[item].to(DEVICE)
        # get true pose
        gt_pose = batch['RTs']
        gt_pose = gt_pose.detach().cpu().numpy().reshape(3,4)
        gts.append(gt_pose)
        # get predicted pose
        pose_dict, seg = model(batch) # pose prediction
        try:
            preds.append(pose_dict[0][1][:3,:])
            r_pred = pose_dict[0][1][:3,:3]
            t_pred = pose_dict[0][1][:3,3]

        except:
            preds.append(np.zeros((3,4)))
            r_pred = np.zeros((3,3))
            t_pred = np.zeros((3,))
        rt= np.concatenate([r_pred.reshape(-1),t_pred.reshape(-1)])
        preds_txt.append(rt)
    total_time = time.time()-time_start
    print(f"total time:{total_time} -- time per frame:{total_time/len(val_dataset)}--fps:{len(val_dataset)/total_time}")
    print("test end !")
    print("--"*30)
    #-------------------------------------------------------------------------------# get pose txt
    
    np.savetxt(f"{folder}/pose.txt",np.array(preds_txt).reshape(-1,12),fmt='%.5f')
    print("pose txt done !")
        
    np.savetxt(f"{folder}/pose_gt.txt",np.array(gts).reshape(-1,12),fmt='%.5f')
    print("pose_gt txt done !")
    #-------------------------------------------------------------------------------# get ADD
    metricer.poses_pred={1:preds}
    metricer.poses_gt={1:gts}
    metricer.evaluate_pose_adds(folder)
    metricer.calculate_class_avg_rotation_error(folder)
    metricer.calculate_class_avg_translation_error(folder)
    metricer.calc_mAP_r(folder)
    metricer.calc_mAP_t(folder)
    print("ADD mAP metric done !")
    
