import cv2
import time
import random
import os,sys
import numpy as np
from PIL import Image
from tqdm import tqdm
sys.path.append(os.getcwd())

import torch
from torch.utils.data import DataLoader
import torchvision.models as models
from torchvision.utils import save_image, make_grid
from InitPose.models.pose_cnn_ssm import PoseSSM
from InitPose.models.PoseAttentionNet import PoseAttention
from InitPose.models.pose_cnn_fft import PoseCNN 
from InitPose.lib.dataset.PROPSPoseDataset import PROPSPoseDataset
from InitPose.lib.options.config import Config


from visualizer.base_visualizer import BaseVisualizer as Visualizer
from visualizer.util_visualizer import mkdir
from visualizer.vis_bbox import create_bounding_box,create_coor,set_message

# eval 进行模型的评估，包括模型的加载，数据的加载，模型的推理，结果的可视化 不包含客观指标的评估 ！！！！

# os.environ['CUDA_VISIBLE_DEVICES'] = '2'

if __name__ == "__main__":
    #-----------------------------------------------------------------------------#PARAMS
    # # folder ='InitPose/checkpoints/Initpose/obj000001/2024-04-16-09-53-01/'
    # # folder ='InitPose/checkpoints/Initpose/obj000001/2024-04-16-12-27-11/'
    # folder ='InitPose/checkpoints/Initpose/obj000001/2024-04-17-16-48-46/'
    # folder ='InitPose/checkpoints/Initpose/obj000001/2024-05-01-17-58-37/' # 001-cnn-
    # folder ='InitPose/checkpoints/Initpose/obj000001/2024-05-04-11-01-48/' # 001-ssm-
    # folder ='InitPose/checkpoints/Initpose/obj000002/2024-05-02-12-38-16/' # 002-cnn-
    # folder ='InitPose/checkpoints/Initpose/obj000002/2024-05-04-01-19-02/' # 002-ssm-
    # folder ='InitPose/checkpoints/Initpose/obj000003/2024-05-02-22-07-40/' # 003-cnn
    folder ='InitPose/checkpoints/Initpose/obj000003/2024-05-03-21-30-37' # 003-ssm
    opt = Config(config_file=f"{folder}/config.json").get_config()

    #------------------------------------------------------------------------------#DATA
    val_dataset = PROPSPoseDataset("./", "test",opt.obj_id)
    val_dataloader   = DataLoader(dataset=val_dataset, batch_size=1,num_workers=opt.num_workers)
    print(f"Dataset sizes: val ({len(val_dataset)})")
    #-----------------------------------------------------------------------------#Device
    if torch.cuda.is_available():
        print("using GPU!")
        DEVICE = torch.device("cuda:0")
    #------------------------------------------------------------------------------#choose MODEL
    if opt.model_name=="CNN":
        vgg16 = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1)
        model = PoseCNN(
                        pretrained_backbone=vgg16,
                        models_pcd=torch.tensor(val_dataset.models_pcd).to(DEVICE, dtype=torch.float32),
                        cam_intrinsic=val_dataset.cam_intrinsic,
                        using_fft=opt.using_fft,
                        using_loss_model=opt.using_loss_model).to(DEVICE)
    if opt.model_name=="SSM":
        vgg16 = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1)
        model = PoseSSM(
                        pretrained_backbone=vgg16,
                        models_pcd=torch.tensor(val_dataset.models_pcd).to(DEVICE, dtype=torch.float32),
                        cam_intrinsic=val_dataset.cam_intrinsic,
                        using_fft=opt.using_fft,
                        using_loss_model=opt.using_loss_model).to(DEVICE)
    elif opt.model_name=="FFT":
        model =PoseAttention(pretrained_backbone=None,
                            models_pcd=torch.tensor(val_dataset.models_pcd).to(DEVICE, dtype=torch.float32),
                            cam_intrinsic=val_dataset.cam_intrinsic,
                            using_loss_model=opt.using_loss_model,
                            num_res=opt.num_res,
                            using_normal=opt.using_normal,
                            using_conv=opt.using_conv,
                            LayerNorm_type=opt.LayerNorm_type,
                            rotation_mode=opt.rotation_mode).to(DEVICE)
    try:
        model.load_state_dict(torch.load(f"{folder}/model_latest.pth")['model'])
    except:
        model.load_state_dict(torch.load(f"{folder}/model_0.pth")['model'])
    finally:
        print(f"{opt.model_name} Model loaded")

    model.eval()
    print("Model loaded !")
    
    #------------------------------------------------------------------------------#total PARAMS
    n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('number of params:', n_parameters)

    #-------------------------------------------------------------------------------# data
    # sample_idx = random.randint(0,len(val_dataloader.dataset)-1)
    sample_idx = 58
    # for sample_idx in tqdm(range(0,len(val_dataset))):
    rgb = torch.tensor(val_dataloader.dataset[sample_idx]['rgb'][None, :]).to(DEVICE)
    inputdict = {'rgb': rgb}

    mkdir(f"{folder}/val/{sample_idx}")
    rgb_grid = make_grid(rgb)
    save_image(rgb_grid, os.path.join(folder,'val',f"{sample_idx}",f'rgb_{sample_idx}.png'))
    print("RGB image saved !")
    #------------------------------------------------------------------------------# eval
    start_time= time.time()
    pose_dict, seg = model(inputdict)
    using_time = time.time()-start_time
    #------------------------------------------------------------------------------# vis mask
    segmap=np.squeeze(seg.cpu().detach().numpy())
    cv2.imwrite(os.path.join(folder,"val",f"{sample_idx}",f"mask_{sample_idx}.png"),segmap*255)
    #------------------------------------------------------------------------------# vis pose
    if pose_dict:
        #------------------------------------------------------------------------------------# data
        data=val_dataloader.dataset[sample_idx]
        img = data['rgb']
        pt_cld_data= val_dataset.models_pcd[0]
        intrinsic_matrix = val_dataset.cam_intrinsic
        #------------------------------------------------------------------------------------# vis pose image
        image_data_transposed = img.transpose((1, 2, 0))
        image_rgb =Image.fromarray((image_data_transposed * 255).astype(np.uint8))
        image_rgb.save(os.path.join(folder,'val',f"{sample_idx}",f'pose_{sample_idx}.png')) # to check
        image_rgb = np.array(image_rgb)
        image_rgb_bbox = np.array(image_rgb)
        image_rgb_coor = np.array(image_rgb)
        real_poses =data['RTs'][0]
        pred_poses = pose_dict[0][1]
        pred_poses = pred_poses[:3,:]
        #-------------------------------------------------------------------------------# vis pose bbox    
        box_imge =create_bounding_box(image_rgb_bbox, real_poses, pt_cld_data, intrinsic_matrix,color=(0,255,0))# green gt
        box_imge = create_bounding_box(image_rgb_bbox, pred_poses, pt_cld_data, intrinsic_matrix,color= (255,0,0))#red pred
        # set_message(box_imge, (100, 50), f'using time: {using_time:.2f} sec')
        cv2.imwrite(os.path.join(folder,"val",f"{sample_idx}",f"pose_bbox_{sample_idx}.png"),box_imge)

        #-------------------------------------------------------------------------------# vis pose coordinate
        coor_imge = create_coor(image_rgb_coor, real_poses, intrinsic_matrix,message='GT')
        coor_imge = create_coor(image_rgb_coor, pred_poses, intrinsic_matrix,message='Pred')
        cv2.imwrite(os.path.join(folder,"val",f"{sample_idx}",f"pose_coor_{sample_idx}.png"),coor_imge)  

        print("Pose visualization saved")

    else:
        print("No object detected")
    
