import os,sys
import json
import random
import numpy as np
import cv2
from PIL import Image
from torch.utils.data import Dataset

sys.path.append(os.getcwd())
from matplotlib import pyplot as plt
from InitPose.lib.utils import Visualize, chromatic_transform, add_noise, visualize_dataset
from InitPose.lib.metrics.pose_evaluator_init import load_model_info

def get_camera_intrinsic(folder):#datasets/col/train/intrinsics.json
    with open(os.path.join(folder , "intrinsics.json"), "r") as f:
        camera_intrinsics = json.load(f)

    K = np.zeros((3, 3), dtype="float64")
    K[0, 0], K[0, 2] = float(camera_intrinsics["fx"]), float(camera_intrinsics["ppx"])
    K[1, 1], K[1, 2] = float(camera_intrinsics["fy"]), float(camera_intrinsics["ppy"])
    K[2, 2] = 1.0
    return (camera_intrinsics, K)


class PROPSPoseDataset(Dataset):


    def __init__(
        self,
        root: str,
        split: str = "train",
        id: int = 1,#obj_id different categories
    ) -> None:
        assert split in {"train", "test"}
        # self.base_folder = f"obj_ac/obj_{id:06d}"#todo
        self.base_folder = f"obj/obj_{id:06d}"#todo
        self.id=id
        self.root = root
        self.split = split
        self.dataset_dir = os.path.join(self.root, self.base_folder)
        ## parameter
        self.max_instance_num = 1
        self.H = 480
        self.W = 640
        self.rgb_aug_prob = 0.4
        self.cam_intrinsic = get_camera_intrinsic(os.path.join(self.dataset_dir))[1]
        self.resolution = [640, 480]

        self.all_lst = self.parse_dir()
        if self.split == "train":
            self.shuffle()
        # self.models_pcd = self.parse_model()
        # np.save(os.path.join(self.dataset_dir, "models_pcd.npy"), self.models_pcd)
        self.models_pcd = np.load(os.path.join(self.dataset_dir, "models_pcd.npy"))
        self.models = {1: {"pts":self.models_pcd[0]}}
        self.models_info={1:load_model_info(self.models_pcd[0])}
        self.models_symmetry={1:1} # if the object is symmetric == 1 else 0

        self.obj_id_list = [1, ]
        self.classes= self.obj_id_list
        self.id2label = {id: idx + 1 for idx, id in enumerate(self.obj_id_list)}

    def parse_dir(self):
        data_dir = os.path.join(self.dataset_dir, f"{self.split}/000001")
        if self.split == "train":
            data_dir = os.path.join(self.dataset_dir, f"{self.split}_pbr/000001")
        rgb_path = os.path.join(data_dir, "rgb")
        mask_path = os.path.join(data_dir, "mask")
        scene_gt_json = os.path.join(data_dir, "scene_gt.json")
        scene_gt_info_json = os.path.join(data_dir, "scene_gt_info.json")
        # rgb_list = os.listdir(rgb_path)
        rgb_list =[f'{i}.jpg' for i in range(len(os.listdir(rgb_path)))]
        # rgb_list.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0]))
        # mask_list = os.listdir(mask_path)
        # mask_list.sort()
        scene_gt = json.load(open(scene_gt_json))
        scene_gt_info = json.load(open(scene_gt_info_json))
        all_lst = []
        for rgb_file in rgb_list:
            idx = int(rgb_file.split(".jpg")[0])
            scene_objs_gt = scene_gt[str(idx)]
            scene_objs_info_gt = scene_gt_info[str(idx)]
            objs_dict = {}
            for obj_idx in range(len(scene_objs_gt)):
                objs_dict[obj_idx] = {}
                objs_dict[obj_idx]["R"] = np.array(
                    scene_objs_gt[obj_idx]["cam_R_m2c"]
                ).reshape(3, 3)
                objs_dict[obj_idx]["T"] = np.array(
                    scene_objs_gt[obj_idx]["cam_t_m2c"]
                ).reshape(3, 1)
                objs_dict[obj_idx]["obj_id"] = scene_objs_gt[obj_idx]["obj_id"]
                objs_dict[obj_idx]["bbox_visib"] = scene_objs_info_gt[obj_idx]["bbox_visib"]
                objs_dict[obj_idx]["visible_mask_path"] = os.path.join(
                    mask_path, f"{idx}.jpg"
                )
            obj_sample = (os.path.join(rgb_path, rgb_file), objs_dict)
            all_lst.append(obj_sample)
        return all_lst

    def parse_model(self):
        model_path = os.path.join(self.dataset_dir,"model")
        objpathdict = {
            1: [
                "obj",
                os.path.join(model_path,"model.obj"),#todo
            ],
        }
        self.visualizer = Visualize(objpathdict, self.cam_intrinsic, self.resolution)
        models_pcd_dict = {
            index: np.array(self.visualizer.objnode[index]["mesh"].vertices)
            for index in self.visualizer.objnode
        }
        models_pcd = np.zeros((len(models_pcd_dict), 1024, 3))
        for m, model in models_pcd_dict.items():
            models_pcd[m - 1] = model[np.random.randint(0, model.shape[0], 1024)]
        return models_pcd

    def __len__(self):
        return len(self.all_lst)

    def __getitem__(self, idx):
        rgb_path, objs_dict = self.all_lst[idx]
        with Image.open(rgb_path) as im:
            rgb = np.array(im)
        # print(rgb.shape)
        if self.split == "train" and np.random.rand(1) > 1 - self.rgb_aug_prob:
            rgb = chromatic_transform(rgb)
            rgb = add_noise(rgb)
        rgb = rgb.astype(np.float32) / 255
        data_dict = {"rgb": rgb.transpose((2, 0, 1))}
        ## TODO data-augmentation of depth
        assert len(objs_dict) <= self.max_instance_num
        objs_id = np.zeros(self.max_instance_num, dtype=np.uint8)
        label = np.zeros((self.max_instance_num + 1, self.H, self.W), dtype=bool)
        bbx = np.zeros((self.max_instance_num, 4))
        RTs = np.zeros((self.max_instance_num, 3, 4))
        centers = np.zeros((self.max_instance_num, 2))
        centermaps = np.zeros(
            (self.max_instance_num, 3, self.resolution[1], self.resolution[0])
        )
        ## test
        img = cv2.imread(rgb_path)
        # print(rgb_path)
        for idx in objs_dict.keys():
            if len(objs_dict[idx]["bbox_visib"]) > 0:
                ## have visible mask
                objs_id[idx] = self.id2label[objs_dict[idx]["obj_id"]]
                assert objs_id[idx] > 0
                with Image.open(objs_dict[idx]["visible_mask_path"]) as im:
                    label[objs_id[idx]] = np.array(im, dtype=bool)
                ## [x_min, y_min, width, height]
                bbx[idx] = objs_dict[idx]["bbox_visib"]
                RT = np.zeros((4, 4))
                RT[3, 3] = 1
                RT[:3, :3] = objs_dict[idx]["R"]
                RT[:3, [3]] = objs_dict[idx]["T"]
                # RT = np.linalg.inv(RT)
                RTs[idx] = RT[:3]
                center_homo = self.cam_intrinsic @ RT[:3, [3]]
                center = center_homo[:2] / center_homo[2]
                x = np.linspace(0, self.resolution[0] - 1, self.resolution[0])
                y = np.linspace(0, self.resolution[1] - 1, self.resolution[1])
                xv, yv = np.meshgrid(x, y)
                dx, dy = center[0] - xv, center[1] - yv
                distance = np.sqrt(dx**2 + dy**2)
                nx, ny = dx / distance, dy / distance
                Tz = np.ones((self.resolution[1], self.resolution[0])) * RT[2, 3]
                centermaps[idx] = np.array([nx, ny, Tz])
                # # test debug todo:check
                img = cv2.circle(
                    img,
                    (int(center[0]), int(center[1])),
                    radius=5,
                    color=(0, 0, 255),
                    thickness=-1,
                )
                cv2.imwrite(f"out/testcenter_{self.id}.jpg", img)
                
                centers[idx] = np.array([int(center[0]), int(center[1])])
        label[0] = 1 - label[1:].sum(axis=0)
        
        
        
        # test debug
        # mask =np.array(im, dtype=bool)
        # filtered_rgb =np.where( mask, centermaps[0,0], 0)
        # plt.imshow(filtered_rgb)
        # plt.savefig(f"out/testfiltered0_{self.id}.jpg")
        # plt.imshow(centermaps[0,0])
        # plt.savefig(f"out/testfiltered0_without_crop_{self.id}.jpg")
        
        # filtered_rgb =np.where( mask, centermaps[0,1], 0)
        # plt.imshow(filtered_rgb)
        # plt.savefig(f"out/testfiltered1_{self.id}.jpg")
        # plt.imshow(centermaps[0,1])
        # plt.savefig(f"out/testfiltered1_without_crop_{self.id}.jpg")
                
        # filtered_rgb =np.where( mask, centermaps[0,2], 0)
        # plt.imshow(filtered_rgb)
        # plt.savefig(f"out/testfiltered2_{self.id}.jpg")
        # plt.imshow(centermaps[0,2])
        # plt.savefig(f"out/testfiltered2_without_crop_{self.id}.jpg")

        data_dict["objs_id"] = objs_id
        data_dict["label"] = label
        data_dict["bbx"] = bbx
        data_dict["RTs"] = RTs
        data_dict["centermaps"] = centermaps.reshape(
            -1, self.resolution[1], self.resolution[0]
        )
        data_dict["centers"] = centers
        return data_dict

    def shuffle(self):
        random.shuffle(self.all_lst)


if __name__ == "__main__":
    from matplotlib import pyplot as plt
    dataset = PROPSPoseDataset(root="./", split="test", id=1)
    print(len(dataset))
    data = dataset.__getitem__(0)
    

    # grid_vis = visualize_dataset(dataset,alpha = 0.25)
    # # print(grid_vis)
    # plt.axis('off')
    # plt.imshow(grid_vis)
    # plt.savefig(f"out/PROPSPoseDataset_{dataset.id}.jpg")
    # # 可视化

