import os
import cv2
import json
import torch
import copy
import warnings
import pickle
import glob,time
import numpy as np
from tqdm import tqdm
from pathlib import Path
from omegaconf import DictConfig, OmegaConf

from utils.geometry.wrappers import Pose, Camera
from models import get_model
from utils.lightening_utils import MyLightningLogger, convert_old_model, load_model_weight
from dataset.utils import read_image, resize, numpy_image_to_torch, crop, zero_pad, get_imgaug_seq
from utils.utils import project_correspondences_line, get_closest_template_view_index,\
    get_closest_k_template_view_index, get_bbox_from_p2d,generate_random_aa_and_t
    
from models.deep_ac import calculate_basic_line_data
from utils.draw_tutorial import draw_correspondence_lines_in_image
import sys
sys.path.append(os.getcwd())
sys.path.append(r'/home/fd_chen/pose/PoseCNN')

from visualizer.util_visualizer import mkdir,mkdirs
from visualizer.vis_bbox import create_bounding_box,create_coor,set_message

from InitPose.lib.metrics.pose_evaluator_lmo import PoseEvaluatorLMO


points =np.load('/home/fd_chen/pose/PoseCNN/obj/obj_000003/models_pcd.npy')

def get_camera_intrinsic(folder=r'/home/fd_chen/pose/PoseCNN/obj_ac/obj_000001/'):#datasets/col/train/intrinsics.json
    with open(os.path.join(folder , "intrinsics.json"), "r") as f:
        camera_intrinsics = json.load(f)

    K = np.zeros((3, 3), dtype="float64")
    K[0, 0], K[0, 2] = float(camera_intrinsics["fx"]), float(camera_intrinsics["ppx"])
    K[1, 1], K[1, 2] = float(camera_intrinsics["fy"]), float(camera_intrinsics["ppy"])
    K[2, 2] = 1.0
    return (camera_intrinsics, K)

def set_pose_gt(poses,iter,cfg):
    iter_pose = poses[iter].reshape(3, 4)
    gt_R = iter_pose[:3,:3]
    gt_t= iter_pose[:3,3] * cfg.geometry_unit_in_meter
    iter_pose = Pose.from_Rt(gt_R, gt_t)    
    return iter_pose

def set_pose(poses,iter,cfg):
    iter_pose = poses[iter]
    init_R = iter_pose[:9].reshape(3, 3)
    init_t = iter_pose[9:] * cfg.geometry_unit_in_meter
    iter_pose = Pose.from_Rt(init_R, init_t)    
    return iter_pose

def set_pose_box(poses,iter,cfg):
    iter_pose = poses[iter]
    init_R = iter_pose[:9].reshape(3, 3)
    init_t = iter_pose[9:] * cfg.geometry_unit_in_meter
    iter_pose_box = np.zeros((3, 4))
    iter_pose_box[:3,:3] =init_R
    iter_pose_box[:3,3] =init_t*cfg.geometry_unit_in_meter
    return iter_pose_box


def get_pose_from_class(pose:Pose):
    pose = pose._data
    pose_ =torch.zeros(3,4)
    pose_[:3,:3] = pose[:9].reshape(3, 3)
    pose_[:3,3] = pose[9:]
    return pose_

@torch.no_grad()
def main(cfg):

    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.gpu_id
    
    #----------------------------------------------------------------#vis
    from InitPose.lib.dataset.PROPSPoseDataset import PROPSPoseDataset
    from torch.utils.data import DataLoader

    val_dataset = PROPSPoseDataset("/home/fd_chen/pose/PoseCNN", "test",1)
    metricer = PoseEvaluatorLMO(models=val_dataset.models, \
                            classes=val_dataset.classes,\
                            model_info=val_dataset.models_info, \
                            model_symmetry=val_dataset.models_symmetry,\
                            depth_scale=1)

    #----------------------------------------------------------------#loggs
    logger = MyLightningLogger('DeepAC', cfg.save_dir)
    logger.dump_cfg(cfg, 'demo_cfg.yml')
    assert ('load_cfg' in cfg)
    assert (Path(cfg.load_cfg).exists())
    
    train_cfg = OmegaConf.load(cfg.load_cfg)
    data_conf = train_cfg.data
    logger.dump_cfg(train_cfg, 'train_cfg.yml')
    #-------------------------------------------------------------------#model
    model = get_model(train_cfg.models.name)(train_cfg.models)
    ckpt = torch.load(cfg.load_model, map_location='cpu')
    if "pytorch-lightning_version" not in ckpt:
        warnings.warn(
            "Warning! Old .pth checkpoint is deprecated. "
            "Convert the checkpoint with tools/convert_old_checkpoint.py "
        )
        ckpt = convert_old_model(ckpt)
    load_model_weight(model, ckpt, logger)
    logger.info("Loaded model weight from {}".format(cfg.load_model))
    model.cuda()
    model.eval()
    #-------------------------------------------------------------------#config
    fore_learn_rate = cfg.fore_learn_rate
    back_learn_rate = cfg.back_learn_rate

    obj_name = cfg.obj_name
    data_dir = cfg.data_dir
    img_dir = os.path.join(data_dir, 'img')
    pose_path = os.path.join(img_dir, 'pose.txt')
    gt_pose_path =os.path.join(img_dir, 'pose_gt.txt')
    K_path = os.path.join(data_dir, 'K.txt')
    template_path = os.path.join(data_dir, obj_name, 'pre_render', f'{obj_name}.pkl')
    #-------------------------------------------------------------------#template
    with open(template_path, "rb") as pkl_handle:
        pre_render_dict = pickle.load(pkl_handle)
    head = pre_render_dict['head']
    num_sample_contour_points = head['num_sample_contour_point']
    template_views = torch.from_numpy(pre_render_dict['template_view']).type(torch.float32)
    orientations = torch.from_numpy(pre_render_dict['orientation_in_body']).type(torch.float32)
    #-------------------------------------------------------------------#data
    K = torch.from_numpy(np.loadtxt(K_path)).type(torch.float32)
    poses = torch.from_numpy(np.loadtxt(pose_path)).type(torch.float32)
    gt_poses = torch.from_numpy(np.loadtxt(gt_pose_path)).type(torch.float32)
    assert len(poses) == len(gt_poses) 
    img_lists =[f'{img_dir}/{i}.jpg' for i in range(len(glob.glob(f'{img_dir}/*.jpg')))]

    def preprocess_image(img, bbox2d, camera):
        bbox2d[2:] += data_conf.crop_border * 2
        img, camera, bbox = crop(img, bbox2d, camera=camera, return_bbox=True)

        scales = (1, 1)
        if isinstance(data_conf.resize, int):
            if data_conf.resize_by == 'max':
                # print('img shape', img.shape)
                # print('img path', image_path)
                img, scales = resize(img, data_conf.resize, fn=max)
            elif (data_conf.resize_by == 'min' or (data_conf.resize_by == 'min_if' and min(*img.shape[:2]) < data_conf.resize)):
                img, scales = resize(img, data_conf.resize, fn=min)
        elif len(data_conf.resize) == 2:
            img, scales = resize(img, list(data_conf.resize))
        if scales != (1, 1):
            camera = camera.scale(scales)

        img, = zero_pad(data_conf.pad, img)
        img = img.astype(np.float32)
        return numpy_image_to_torch(img), camera

    if cfg.output_video:
        video = cv2.VideoWriter(os.path.join(logger.log_dir, obj_name + ".avi"),  # 
                                cv2.VideoWriter_fourcc('M', 'P', '4', '2'), 30, cfg.output_size)

    check_box_dir=os.path.join(logger.log_dir, 'check_box/')#
    init_dir=os.path.join(logger.log_dir, 'init/')
    gt_dir=os.path.join(logger.log_dir, 'gt/')
    match_demo_dir=os.path.join(logger.log_dir, 'match_demo/')
    result_dir=os.path.join(logger.log_dir, 'result/')
    feature_match_dir = os.path.join(logger.log_dir, 'feature_match/') 
    feature_pca_dir = os.path.join(logger.log_dir, 'feature_pca/') 
    lines_dir = os.path.join(logger.log_dir, 'lines/')
    
    mkdir(check_box_dir)
    mkdir(init_dir)
    mkdir(gt_dir)
    mkdir(match_demo_dir)
    mkdir(result_dir)
    mkdir(feature_match_dir)
    mkdir(feature_pca_dir)
    mkdir(lines_dir)

    preds=[]
    gts = []
    time_start = 0
    for i, img_path in enumerate(tqdm(img_lists)):

        init_pose = set_pose(poses,i,cfg)
        gt_pose = set_pose_gt(gt_poses,i,cfg)
        ori_image = read_image(img_path)
        #-----------------------------------------------------------------------------------------#carmera
        height, width = ori_image.shape[:2]
        intrinsic_param = torch.tensor([width, height, K[0], K[4], K[2], K[5]], dtype=torch.float32)
        ori_camera = Camera(intrinsic_param)
        #-----------------------------------------------------------------------------------------#vis bbox to check           
        if True:# vis bbox
            cam_intrinsic = get_camera_intrinsic()[1]
            pose_vis=set_pose_box(poses,i,cfg)
            box_img=create_bounding_box(cv2.imread(img_path), pose_vis, points[0], cam_intrinsic, color=(0, 255, 0))
            cv2.imwrite(os.path.join(check_box_dir,f'{i}.png'), box_img)
            # cv2.imwrite(f'data/check/box_demo_check/{i}.png', box_img)
        
        #-----------------------------------------------------------------------------------------#get template view
        indices = get_closest_k_template_view_index(init_pose, orientations,
                                                    data_conf.get_top_k_template_views * data_conf.skip_template_view)
        closest_template_views = torch.stack([template_views[ind * num_sample_contour_points:(ind + 1) * num_sample_contour_points, :]
                                                for ind in indices[::data_conf.skip_template_view]])
        closest_orientations_in_body = orientations[indices[::data_conf.skip_template_view]]
        data_lines = project_correspondences_line(closest_template_views[0], init_pose, ori_camera)
        bbox2d = get_bbox_from_p2d(data_lines['centers_in_image'])
        img, camera = preprocess_image(ori_image, bbox2d.numpy().copy(), ori_camera)
        #-----------------------------------------------------------------------------------------#vis lines to check
        if True:
            data_lines = project_correspondences_line(closest_template_views[0], init_pose, ori_camera)
            display_image = draw_correspondence_lines_in_image((ori_image* 255).astype(np.uint8),
                                                               data_lines['centers_in_image'],
                                                               data_lines['centers_valid'],
                                                               data_lines['normals_in_image'], 10)
            cv2.imwrite(os.path.join(match_demo_dir,f'{i}.jpg'), display_image)
            # cv2.imwrite(f'data/check/match_demo_check/{i}.png', display_image)
        #-----------------------------------------------------------------------------------------#histogram init
        if i == 0:
            _, _, centers_in_image, centers_valid, normals_in_image, foreground_distance, background_distance, _ =\
                calculate_basic_line_data(closest_template_views[None][:, 0], init_pose[None]._data, camera[None]._data, 1, 0)
            total_fore_hist, total_back_hist = \
                model.histogram.calculate_histogram(img[None], centers_in_image, centers_valid, normals_in_image, 
                                                    foreground_distance, background_distance, True)

        data = {
            'image': img[None].cuda(),
            'camera': camera[None].cuda(),
            'body2view_pose': init_pose[None].cuda(),
            'closest_template_views': closest_template_views[None].cuda(),
            'closest_orientations_in_body': closest_orientations_in_body[None].cuda(),
            'fore_hist': total_fore_hist.cuda(),
            'back_hist': total_back_hist.cuda()
        }
        time1 =time.time()
        pred= model._forward(data, visualize=False, tracking=True)
        delta_time =time.time()-time1
        time_start +=delta_time
        # pred,pca_feature = model._forward(data, visualize=False, tracking=True)
        #-----------------------------------------------------------------------------------------#vis feature to check
        if False:
            #创建一个透明图像
            background_lines= np.zeros((320, 320, 3), dtype=np.uint8)
            # background_lines.fill(255)
            # 增加一个透明度通道
            background_lines = cv2.cvtColor(background_lines, cv2.COLOR_BGR2BGRA)
            
            
            cv2.imwrite(os.path.join(feature_pca_dir,f'{i}.jpg'), pca_feature)
            random_aa, random_t = generate_random_aa_and_t(5., 25.,
                                                           0.005, 0.025)
            random_pose = Pose.from_aa(random_aa, random_t)
            gt_body2view_pose=init_pose
            body2view_pose = gt_body2view_pose @ random_pose[0]
            data_lines = project_correspondences_line(closest_template_views[0], body2view_pose, camera)
            feature_match_image = draw_correspondence_lines_in_image((pca_feature).astype(np.uint8),
                                                        data_lines['centers_in_image'],
                                                        data_lines['centers_valid'],
                                                        data_lines['normals_in_image'], 10)
            feature_match_image_lines = draw_correspondence_lines_in_image(background_lines,
                                                        data_lines['centers_in_image'],
                                                        data_lines['centers_valid'],
                                                        data_lines['normals_in_image'], 10)
            cv2.imwrite(os.path.join(feature_match_dir,f'{i}.jpg'), feature_match_image)    
            cv2.imwrite(os.path.join(lines_dir,f'{i}.jpg'), feature_match_image_lines)

        #-----------------------------------------------------------------------------------------#vis result coor to check
        if cfg.output_video:
            pred['optimizing_result_imgs'] = []
            model.visualize_optimization(pred['opt_body2view_pose'][-1], pred)
            video.write(cv2.resize(pred['optimizing_result_imgs'][0][0], cfg.output_size))
            cv2.imwrite(os.path.join(result_dir, f'{i:04d}.jpg'), pred['optimizing_result_imgs'][0][0])
            # cv2.imwrite(os.path.join(logger.log_dir, f'result/{i:04d}.jpg'), pred['optimizing_result_imgs'][0][0])
        #-----------------------------------------------------------------------------------------#vis  init pose to check
        if True:
            pred['optimizing_result_imgs'] = []
            model.visualize_optimization(pred['opt_body2view_pose'][-1][0].cuda(), pred) #可视化优化后的位姿值 轮廓特征点
            cv2.imwrite(os.path.join(result_dir, f'{i:04d}.jpg'), pred['optimizing_result_imgs'][0][0])
        
        if True:
            pred['optimizing_result_imgs'] = []
            model.visualize_optimization(init_pose.cuda(), pred) #可视化初始的位姿值 轮廓特征点
            cv2.imwrite(os.path.join(init_dir, f'{i:04d}.jpg'), pred['optimizing_result_imgs'][0][0])
            # cv2.imwrite(os.path.join(logger.log_dir, f'init_pose/{i:04d}.jpg'), pred['optimizing_result_imgs'][0][0])
            
        if True:
            pred['optimizing_result_imgs'] = []
            model.visualize_optimization(gt_pose.cuda(), pred) #可视化真值 轮廓特征点
            cv2.imwrite(os.path.join(gt_dir, f'{i:04d}.jpg'), pred['optimizing_result_imgs'][0][0])
        #-----------------------------------------------------------------------------------------#metric
        pred_pose = get_pose_from_class(pred['opt_body2view_pose'][-1][0].cpu())
        gt_pose =get_pose_from_class(gt_pose.cpu())
        
        preds.append(pred_pose.numpy())
        gts.append(gt_pose.numpy())
        
        #-----------------------------------------------------------------------------------------# update histgram 
        init_pose = pred['opt_body2view_pose'][-1][0].cpu() # 验证精度
        index = get_closest_template_view_index(init_pose, orientations)
        closest_template_view = template_views[index*num_sample_contour_points:(index+1)*num_sample_contour_points, :]
        _, _, centers_in_image, centers_valid, normals_in_image, foreground_distance, background_distance, _ =\
            calculate_basic_line_data(closest_template_view[None], init_pose[None]._data, camera[None]._data, 1, 0)
        fore_hist, back_hist = \
            model.histogram.calculate_histogram(img[None], centers_in_image, centers_valid, normals_in_image, 
                                                foreground_distance, background_distance, True)
        total_fore_hist = (1 - fore_learn_rate) * total_fore_hist + fore_learn_rate * fore_hist
        total_back_hist = (1 - back_learn_rate) * total_back_hist + back_learn_rate * back_hist
        

    video.release()
    folder =logger.log_dir+ '/metric/'
    mkdir(folder)
    metricer.poses_pred={1:preds}
    metricer.poses_gt={1:gts}
    metricer.evaluate_pose_adds(folder)
    metricer.calculate_class_avg_rotation_error(folder)
    metricer.calculate_class_avg_translation_error(folder)
    metricer.calc_mAP_r(folder)
    metricer.calc_mAP_t(folder)

    print("ADD mAP metric done !")
    print("time cost:",time_start/len(img_lists))