import re
import sys
sys.path.append('./')
sys.path.append('./pose_estimator')
import _pickle as cPickle
import argparse
import torch
import cv2
import os
import numpy as np
import torch.nn.functional as F
import torchvision.transforms as transforms
from pose_estimator.lib.network import DeformNet
from pose_estimator.lib.align import estimateSimilarityTransform
from pose_estimator.lib.utils import load_depth, get_bbox, draw_detections
from tqdm import tqdm
import time
import open3d as o3d
import copy



def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_dir', type=str, default='rs_imgs', help='data directory')
    parser.add_argument('--img_num', type=int, default=17)
    parser.add_argument('--n_cat', type=int, default=6, help='number of object categories')
    parser.add_argument('--nv_prior', type=int, default=1024, help='number of vertices in shape priors')
    parser.add_argument('--checkpoint_pose', type=str, default='pose_estimator/checkpoints/real/model_50.pth', help='resume from saved model')
    parser.add_argument('--n_pts', type=int, default=1024, help='number of foreground points')
    parser.add_argument('--img_size', type=int, default=192, help='cropped image size')
    parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
    parser.add_argument("--save_pose", action='store_true', default=False)
    parser.add_argument("--show_pose", action='store_true', default=False)
    opt = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
    return opt

def load_model_pose(opt):
    estimator = DeformNet(opt.n_cat, opt.nv_prior)
    estimator.cuda()
    estimator.load_state_dict(torch.load(opt.checkpoint_pose))
    estimator.eval()
    return estimator

def get_pose(raw_rgb, raw_depth, mask_result, mean_shapes, icp_flag=True, model=None, args=None):
    cam_fx, cam_fy = args.intrinsics[0, 0], args.intrinsics[1, 1]
    cam_cx, cam_cy = args.intrinsics[0, 2], args.intrinsics[1, 2]
    xmap = args.xmap
    ymap = args.ymap
    norm_scale = args.norm_scale
    norm_color = args.norm_color

    if type(mask_result['class_ids']) is int:
        for k, v in mask_result.items():
            mask_result[k] = np.expand_dims(np.array(v), axis=0)

    if model is None:
        estimator = load_model_pose(args)
    else:
        estimator = model

    # prepare frame data
    f_points, f_rgb, f_choose, f_catId, f_prior = [], [], [], [], []

    if icp_flag:
        f_points_icp = []
        icp_thres = [0.005,0.001,0.03,0.005,0.005,0.01]

    valid_inst = []
    num_insts = len(mask_result['class_ids'])
    for i in range(num_insts):
        cat_id = mask_result['class_ids'][i]
        prior = mean_shapes[cat_id]
        rmin, rmax, cmin, cmax = get_bbox(mask_result['rois'][i])
        rmin, rmax, cmin, cmax = int(rmin), int(rmax), int(cmin), int(cmax)
        mask = np.logical_and(mask_result['masks'][i, :, :], raw_depth > 0)
        choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
        
        if len(choose) < 32:
            continue
        else:
            valid_inst.append(i)

        ########## 2021.12.13 YJH ADD ICP ############
        if icp_flag:
            depth_icp = raw_depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
            xmap_icp = xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
            ymap_icp = ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]

            pt2_icp = depth_icp / norm_scale
            pt2_icp = np.clip(pt2_icp, np.mean(pt2_icp) - 0.3, np.mean(pt2_icp) + 0.3)
            pt0_icp = (xmap_icp - cam_cx) * pt2_icp / cam_fx
            pt0_icp = np.clip(pt0_icp, np.mean(pt0_icp) - 0.3, np.mean(pt0_icp) + 0.3)
            pt1_icp = (ymap_icp - cam_cy) * pt2_icp / cam_fy
            pt1_icp = np.clip(pt1_icp, np.mean(pt1_icp) - 0.3, np.mean(pt1_icp) + 0.3)

            points_icp = np.concatenate((pt0_icp, pt1_icp, pt2_icp), axis=1)
            
            f_points_icp.append(points_icp)
        ##############################################

        # process objects with valid depth observation
        if len(choose) > args.n_pts:
            c_mask = np.zeros(len(choose), dtype=int)
            c_mask[:args.n_pts] = 1
            np.random.shuffle(c_mask)
            choose = choose[c_mask.nonzero()]
        else:
            choose = np.pad(choose, (0, args.n_pts-len(choose)), 'wrap')
        depth_masked = raw_depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
        xmap_masked = xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
        ymap_masked = ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
        pt2 = depth_masked / norm_scale
        pt0 = (xmap_masked - cam_cx) * pt2 / cam_fx
        pt1 = (ymap_masked - cam_cy) * pt2 / cam_fy
        points = np.concatenate((pt0, pt1, pt2), axis=1)
        rgb = raw_rgb[rmin:rmax, cmin:cmax, :]
        rgb = cv2.resize(rgb, (args.img_size, args.img_size), interpolation=cv2.INTER_LINEAR)
        rgb = norm_color(rgb)
        crop_w = rmax - rmin
        ratio = args.img_size / crop_w
        col_idx = choose % crop_w
        row_idx = choose // crop_w
        choose = (np.floor(row_idx * ratio) * args.img_size + np.floor(col_idx * ratio)).astype(np.int64)
        # concatenate instances
        f_points.append(points)
        f_rgb.append(rgb)
        f_choose.append(choose)
        f_catId.append(cat_id)
        f_prior.append(prior)

    num_valid_inst = len(valid_inst)
    if num_valid_inst == 0:
        return None
    valid_inst_np = np.array(valid_inst)
    mask_result['class_ids'] = mask_result['class_ids'][valid_inst_np]
    mask_result['rois'] = mask_result['rois'][valid_inst_np]
    mask_result['scores'] = mask_result['scores'][valid_inst_np]
    f_sRT = np.zeros((num_valid_inst, 4, 4), dtype=float)
    f_s = np.zeros((num_valid_inst), dtype=float)
    f_R = np.zeros((num_valid_inst, 3, 3), dtype=float)
    f_t = np.zeros((num_valid_inst, 3), dtype=float)
    f_size = np.zeros((num_valid_inst, 3), dtype=float)

    if len(valid_inst):
        f_points = torch.cuda.FloatTensor(f_points)
        f_rgb = torch.stack(f_rgb, dim=0).cuda()
        f_choose = torch.cuda.LongTensor(f_choose)
        f_catId = torch.cuda.LongTensor(f_catId)
        f_prior = torch.cuda.FloatTensor(f_prior)
        # inference
        assign_mat, deltas = estimator(f_points, f_rgb, f_choose, f_catId, f_prior)
        # assign_mat, deltas = estimator(f_rgb, f_choose, f_catId, f_prior)
        inst_shape = f_prior + deltas
        assign_mat = F.softmax(assign_mat, dim=2)
        f_coords = torch.bmm(assign_mat, inst_shape)  # bs x n_pts x 3

        f_coords = f_coords.detach().cpu().numpy()
        f_points = f_points.cpu().numpy()
        f_choose = f_choose.cpu().numpy()
        f_insts = inst_shape.detach().cpu().numpy()

        # bs = len(valid_inst)
        # i: inverse transformation
        cuda_pred_i_sR = torch.zeros([num_valid_inst, 3, 3], dtype=torch.float32).cuda()
        cuda_pred_t = torch.zeros([num_valid_inst, 1, 3], dtype=torch.float32).cuda()

        for i in range(num_valid_inst):
            # inst_idx = valid_inst[i]
            choose = f_choose[i]
            _, choose = np.unique(choose, return_index=True)
            nocs_coords = f_coords[i, choose, :]
            f_size[i] = 2 * np.amax(np.abs(f_insts[i]), axis=0)
            points = f_points[i, choose, :]
            _, _, _, pred_sRT = estimateSimilarityTransform(nocs_coords, points)
            if pred_sRT is None:
                pred_sRT = np.identity(4, dtype=float)
            f_sRT[i] = pred_sRT

            # if icp_flag:
            #     pc_cam = o3d.geometry.PointCloud()
            #     pc_cam.points = o3d.utility.Vector3dVector(f_points_icp[i])
            #     pc_obj = o3d.geometry.PointCloud()
            #     pc_obj.points = o3d.utility.Vector3dVector(f_insts[i])
            #     sRT = copy.deepcopy(f_sRT[i])

            #     trans_method = o3d.pipelines.registration.TransformationEstimationPointToPoint()
            #     trans_method.with_scaling = False
            #     reg_p2p = o3d.pipelines.registration.registration_icp(
            #             pc_obj, pc_cam, icp_thres[mask_result['class_ids'][i]], sRT,
            #             trans_method,
            #             o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=100))
            #     f_sRT[i] = reg_p2p.transformation

            s = np.cbrt(np.linalg.det(f_sRT[i][:3, :3]))
            R = f_sRT[i][:3, :3] / s
            i_sR = R / s

            f_s[i] = s
            f_R[i] = R
            f_t[i] = f_sRT[i][:3, 3]

            cuda_pred_i_sR[i] = torch.from_numpy(i_sR).float().cuda()
            cuda_pred_t[i] = torch.from_numpy(f_t[i]).view(1,3).float().cuda()

        cuda_obj_pcs = torch.bmm(torch.add(torch.cuda.FloatTensor(f_points), -cuda_pred_t), cuda_pred_i_sR)

        # for i in range(cuda_obj_pcs.shape[0]):
        #     cuda_obj_pcs_array = cuda_obj_pcs[i].detach().cpu().numpy()
        #     import trimesh
        #     pc = trimesh.PointCloud(cuda_obj_pcs_array)
        #     trimesh.Scene([pc]).show()

        # filter point cloud in OBJCOORD
        obj_pcs = np.zeros_like(f_points)
        for i in range(cuda_obj_pcs.shape[0]):
            obj_pc = cuda_obj_pcs[i].detach().cpu().numpy()
            assert obj_pc.shape[1] == 3
            norm_pt = np.linalg.norm(obj_pc, axis=1)
            index = np.where(norm_pt < 0.35)
            obj_pc = obj_pc[index]
            # import trimesh
            # pc = trimesh.PointCloud(obj_pc)
            # trimesh.Scene([pc]).show()
            if len(obj_pc) >= args.n_pts:
                sample_index = np.random.choice(obj_pc.shape[0], args.n_pts, replace=False)
            else:
                sample_index = np.random.choice(obj_pc.shape[0], args.n_pts, replace=True)
                obj_pc = obj_pc[sample_index, :]
            obj_pcs[i] = obj_pc

    result = {}
    # 2D
    result['pred_class_ids'] = mask_result['class_ids']
    result['pred_bboxes'] = mask_result['rois']
    result['pred_scores'] = mask_result['scores']
    # 3D
    result['pred_sRT'] = f_sRT
    result['pred_s'] = f_s
    result['pred_R'] = f_R
    result['pred_t'] = f_t
    result['pred_scales'] = f_size
    result['obj_pcs'] = obj_pcs
    result['cam_pcs'] = f_points_icp # type : list, here save point cloud in CAMERA COORDINATE for ICP
    
    if args.show_pose:
        show_pose(raw_rgb, result, args)
    return result


def detect(args):
    # resume model
    estimator = DeformNet(opt.n_cat, opt.nv_prior)
    estimator.cuda()
    estimator.load_state_dict(torch.load(opt.checkpoint_pose))
    estimator.eval()
    # frame by frame test
    t_inference = 0.0
    t_umeyama = 0.0
    inst_count = 0
    img_count = 0

    img_list = []
    for i in range(opt.img_num):
        img_list.append(str(i).zfill(4))

    print(img_list)
    for path in tqdm(img_list):
        t_start = time.time()
        img_path = os.path.join(opt.data_dir, path)
        raw_rgb = cv2.imread(img_path + '_color.png')[:, :, :3]
        raw_rgb = raw_rgb[:, :, ::-1]
        raw_depth = load_depth(img_path)

        # load mask-rcnn detection results
        img_path_parsing = img_path.split('/')
        mrcnn_path = os.path.join(opt.data_dir, '{}_mask.pkl'.format(img_path_parsing[-1]))
        with open(mrcnn_path, 'rb') as f:
            mrcnn_result = cPickle.load(f)
        

        # index = np.argmax(mrcnn_result['scores'])
        for k, v in mrcnn_result.items():
            mrcnn_result[k] = np.expand_dims(np.array(v), axis=0)
        print(mrcnn_result)

        # num_insts = len(mrcnn_result['class_ids'])
        num_insts = 1
        f_sRT = np.zeros((num_insts, 4, 4), dtype=float)
        f_size = np.zeros((num_insts, 3), dtype=float)
        # prepare frame data
        f_points, f_rgb, f_choose, f_catId, f_prior = [], [], [], [], []
        valid_inst = []
        for i in range(num_insts):
            cat_id = mrcnn_result['class_ids'][i]
            prior = mean_shapes[cat_id]
            rmin, rmax, cmin, cmax = get_bbox(mrcnn_result['rois'][i])
            rmin, rmax, cmin, cmax = int(rmin), int(rmax), int(cmin), int(cmax)
            mask = np.logical_and(mrcnn_result['masks'][i, :, :], raw_depth > 0)
            choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
            # no depth observation for background in CAMERA dataset
            # beacuase of how we compute the bbox in function get_bbox
            # there might be a chance that no foreground points after cropping the mask
            # cuased by false positive of mask_rcnn, most of the regions are background
            if len(choose) < 32:
                f_sRT[i] = np.identity(4, dtype=float)
                f_size[i] = 2 * np.amax(np.abs(prior), axis=0)
                # print(i, len(choose))
                continue
            else:
                valid_inst.append(i)
            # process objects with valid depth observation
            if len(choose) > opt.n_pts:
                c_mask = np.zeros(len(choose), dtype=int)
                c_mask[:opt.n_pts] = 1
                np.random.shuffle(c_mask)
                choose = choose[c_mask.nonzero()]
            else:
                choose = np.pad(choose, (0, opt.n_pts-len(choose)), 'wrap')
            depth_masked = raw_depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
            xmap_masked = xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
            ymap_masked = ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis]
            pt2 = depth_masked / norm_scale
            pt0 = (xmap_masked - cam_cx) * pt2 / cam_fx
            pt1 = (ymap_masked - cam_cy) * pt2 / cam_fy
            points = np.concatenate((pt0, pt1, pt2), axis=1)
            rgb = raw_rgb[rmin:rmax, cmin:cmax, :]
            rgb = cv2.resize(rgb, (opt.img_size, opt.img_size), interpolation=cv2.INTER_LINEAR)
            rgb = norm_color(rgb)
            crop_w = rmax - rmin
            ratio = opt.img_size / crop_w
            col_idx = choose % crop_w
            row_idx = choose // crop_w
            choose = (np.floor(row_idx * ratio) * opt.img_size + np.floor(col_idx * ratio)).astype(np.int64)
            # concatenate instances
            f_points.append(points)
            f_rgb.append(rgb)
            f_choose.append(choose)
            f_catId.append(cat_id)
            f_prior.append(prior)
        if len(valid_inst):
            f_points = torch.cuda.FloatTensor(f_points)
            f_rgb = torch.stack(f_rgb, dim=0).cuda()
            f_choose = torch.cuda.LongTensor(f_choose)
            f_catId = torch.cuda.LongTensor(f_catId)
            f_prior = torch.cuda.FloatTensor(f_prior)
            # inference
            torch.cuda.synchronize()
            t_now = time.time()
            assign_mat, deltas = estimator(f_points, f_rgb, f_choose, f_catId, f_prior)
            # assign_mat, deltas = estimator(f_rgb, f_choose, f_catId, f_prior)
            inst_shape = f_prior + deltas
            assign_mat = F.softmax(assign_mat, dim=2)
            f_coords = torch.bmm(assign_mat, inst_shape)  # bs x n_pts x 3
            torch.cuda.synchronize()
            t_inference += (time.time() - t_now)
            f_coords = f_coords.detach().cpu().numpy()
            f_points = f_points.cpu().numpy()
            f_choose = f_choose.cpu().numpy()
            f_insts = inst_shape.detach().cpu().numpy()
            t_now = time.time()

            bs = len(valid_inst)
            # i: ni bian huan
            cuda_pred_i_sR = torch.zeros([bs, 3, 3], dtype=torch.float32).cuda()
            cuda_pred_R = torch.zeros([bs, 3, 3], dtype=torch.float32).cuda()
            cuda_pred_s = torch.zeros([bs], dtype=torch.float32).cuda()
            cuda_pred_t = torch.zeros([bs, 1, 3], dtype=torch.float32).cuda()

            for i in range(len(valid_inst)):
                inst_idx = valid_inst[i]
                choose = f_choose[i]
                _, choose = np.unique(choose, return_index=True)
                nocs_coords = f_coords[i, choose, :]
                f_size[inst_idx] = 2 * np.amax(np.abs(f_insts[i]), axis=0)
                points = f_points[i, choose, :]
                _, _, _, pred_sRT = estimateSimilarityTransform(nocs_coords, points)
                if pred_sRT is None:
                    pred_sRT = np.identity(4, dtype=float)
                f_sRT[inst_idx] = pred_sRT

                s = np.cbrt(np.linalg.det(pred_sRT[:3, :3]))
                R = pred_sRT[:3, :3] / s
                i_sR = R / s
                cuda_pred_i_sR[i] = torch.from_numpy(i_sR).float().cuda()
                cuda_pred_R[i] = torch.from_numpy(R).float().cuda()
                cuda_pred_s[i] = s
                cuda_pred_t[i] = torch.from_numpy(pred_sRT[:3,3]).view(1,3).float().cuda()

            cuda_obj_pcs = torch.bmm(torch.add(torch.cuda.FloatTensor(f_points), -cuda_pred_t), cuda_pred_i_sR)

            # for i in range(cuda_obj_pcs.shape[0]):
            #     cuda_obj_pcs_array = cuda_obj_pcs[i].detach().cpu().numpy()
            #     import trimesh
            #     pc = trimesh.PointCloud(cuda_obj_pcs_array)
            #     trimesh.Scene([pc]).show()
            obj_pcs = np.zeros_like(f_points)
            for i in range(cuda_obj_pcs.shape[0]):
                obj_pc = cuda_obj_pcs[i].detach().cpu().numpy()
                assert obj_pc.shape[1] == 3
                norm_pt = np.linalg.norm(obj_pc, axis=1)
                index = np.where(norm_pt < 0.35)
                obj_pc = obj_pc[index]
                # import trimesh
                # pc = trimesh.PointCloud(obj_pc)
                # trimesh.Scene([pc]).show()
                if len(obj_pc) >= opt.n_pts:
                    sample_index = np.random.choice(obj_pc.shape[0], opt.n_pts, replace=False)
                else:
                    sample_index = np.random.choice(obj_pc.shape[0], opt.n_pts, replace=True)
                    obj_pc = obj_pc[sample_index, :]
                obj_pcs[i] = obj_pc

            t_umeyama += (time.time() - t_now)
            img_count += 1
            inst_count += len(valid_inst)

        # save results
        result = {}
        result['pred_class_ids'] = mrcnn_result['class_ids']
        result['pred_bboxes'] = mrcnn_result['rois']
        result['pred_scores'] = mrcnn_result['scores']
        result['pred_sRT'] = f_sRT
        result['pred_scales'] = f_size
        result['obj_pcs'] = obj_pcs
        
        if args.save_pose:
            save_path = os.path.join('{}_pose.pkl'.format(img_path))
            with open(save_path, 'wb') as f:
                cPickle.dump(result, f)

    # write statistics
    messages = []
    messages.append("Total images: {}".format(len(img_list)))
    messages.append("Valid images: {},  Total instances: {},  Average: {:.2f}/image".format(
        img_count, inst_count, inst_count/img_count))
    messages.append("Inference time: {:06f}  Average: {:06f}/image".format(t_inference, t_inference/img_count))
    messages.append("Umeyama time: {:06f}  Average: {:06f}/image".format(t_umeyama, t_umeyama/img_count))
    messages.append("Total time: {:06f}".format(time.time() - t_start))
    for msg in messages:
        print(msg)

def show_pose(raw_rgb, pose_result, args, img_id=0):
    draw_detections(
        img=raw_rgb,
        out_dir='./vis_results',
        data_name='mugs',
        img_id= img_id,
        intrinsics=args.intrinsics,
        pred_sRT=pose_result['pred_sRT'],
        pred_size=pose_result['pred_scales'],
        pred_class_ids=pose_result['pred_class_ids'] + 1,
        gt_sRT=None,
        gt_size=None,
        gt_class_ids=None,
        nocs_sRT=None,
        nocs_size=None,
        nocs_class_ids=None,
        draw_nocs=False,
        draw_gt=False
    )

def show(args=None):
    for i in range(0, args.img_num):
        args.img_id = "%04d" % i
        img_pth = os.path.join(args.data_dir, args.img_id)
        raw_rgb = cv2.imread(img_pth + '_color.png')[:, :, :3]
        raw_rgb = raw_rgb.astype(np.uint8)
        with open(args.data_dir + '/' + args.img_id + '_pose.pkl', 'rb') as f:
            result = cPickle.load(f)
        draw_detections(
            img=raw_rgb,
            out_dir='./vis_results',
            data_name='mugs',
            img_id= args.img_id,
            intrinsics=intrinsics,
            pred_sRT=result['pred_sRT'],
            pred_size=result['pred_scales'],
            pred_class_ids=result['pred_class_ids'] + 1,
            gt_sRT=None,
            gt_size=None,
            gt_class_ids=None,
            nocs_sRT=None,
            nocs_size=None,
            nocs_class_ids=None,
            draw_nocs=False,
            draw_gt=False
        )

if __name__ == "__main__":
    # detect()
    mean_shapes = np.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),'assets/mean_points_emb.npy'))
    cam_fx, cam_fy, cam_cx, cam_cy = 603.1332397460938, 601.6819458007812, 323.1732177734375, 241.5373077392578
    intrinsics = [[cam_fx, 0, cam_cx], [0, cam_fy, cam_cy], [0, 0, 1]]

    xmap = np.array([[i for i in range(640)] for j in range(480)])
    ymap = np.array([[j for i in range(640)] for j in range(480)])
    norm_scale = 1000.0
    norm_color = transforms.Compose(
        [transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
    )
    opt = get_args()
    if opt.show_pose:
    # if 1:
        show(opt)

