import open3d as o3d
import os
import json
import sys
import threading
from os.path import join, isfile
import numpy as np
import cv2
from transforms3d.quaternions import quat2mat
from utils.bvh2joint import bvh2joint, default_end_link_trans

import urllib.request
from collections import Counter
# from parse_rosbag import parse_rosbag

pwd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(pwd, "..","Open3D", "examples", "python", "reconstruction_system"))
from initialize_config import initialize_config
 


class AzureReader(threading.Thread):
    args = {}
    reader = None
    cfg = {}
    def __init__(self, kwargs_args):
        threading.Thread.__init__(self)
        self.args["input"] = kwargs_args["input"]
        self.args["output"] = kwargs_args["output"]
        # self.args["path"] = kwargs_args["path"]
        # self.args["segment_name"] = kwargs_args["segment_name"]
        # self.args["rosbag_url"] = kwargs_args["rosbag_url"]
        # self.args["intrinsic_mat"] = kwargs_args["intrinsic_mat"]
        # self.args["camera2world_dir"] = kwargs_args["camera2world_dir"]
        # self.args["ts_dir"] = kwargs_args["ts_dir"]
        # self.args["obj_dir"] = kwargs_args["obj_dir"]
        self.args["convert_mkv"] = kwargs_args["convert_mkv"]
        # self.args["VTS_add_time"] = kwargs_args["VTS_add_time"]
        # self.cfg = kwargs_args["cfg"]
        
        
        if not os.path.exists(self.args["output"]):
            os.mkdir(self.args["output"])
            os.mkdir('{}/color'.format(self.args["output"]))
            os.mkdir('{}/depth'.format(self.args["output"]))
            
        self.reader = o3d.io.AzureKinectMKVReader()
        self.reader.open(self.args["input"])
        if not self.reader.is_opened():
            raise RuntimeError("Unable to open file {}".format(self.args["input"]))
        
        
    # def render(self, img ,joint1_data, joint2_data, obj2world, obj_p, camera_intrinsic, camera_extrinsic):
    #     # world -> camera -> image
    #     # person1
    #     if not joint1_data is None:
    #         p = np.concatenate((joint1_data, np.ones((joint1_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
    #         p = p @ camera_extrinsic.transpose(1, 0)
    #         p = p[:, :3]  # (74, 3), in camera space
    #         uv = p @ camera_intrinsic.transpose(1, 0)
    #         uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
    #         joint1_pixels = uv.astype(np.int32)
    #     # person2
    #     if not joint2_data is None:
    #         p = np.concatenate((joint2_data, np.ones((joint2_data.shape[0], 1))), axis=-1)  # (74, 4), in world space
    #         p = p @ camera_extrinsic.transpose(1, 0)
    #         p = p[:, :3]  # (74, 3), in camera space
    #         uv = p @ camera_intrinsic.transpose(1, 0)
    #         uv = uv[:, :2] / uv[:, 2:]  # (74, 2), in image space
    #         joint2_pixels = uv.astype(np.int32)
    #     # object
    #     if not obj2world is None:
    #         p = np.concatenate((obj_p, np.ones((obj_p.shape[0], 1))), axis=-1)  # (500, 4), in object space
    #         p = p @ obj2world.T  # (500, 4), in world space
    #         p = p @ camera_extrinsic.transpose(1, 0)
    #         p = p[:, :3]  # (500, 3), in camera space
    #         uv = p @ camera_intrinsic.transpose(1, 0)
    #         uv = uv[:, :2] / uv[:, 2:]  # (500, 2), in image space
    #         obj_pixels = uv.astype(np.int32)
    #     else:
    #         obj_pixels = None
    #     # render
    #     img = img.astype(np.uint8) 
    #     if not joint1_data is None:
    #         for p in joint1_pixels:
    #             cv2.circle(img, tuple(p), 5, (0, 0, 255), -1)
    #     if not joint2_data is None:
    #         for p in joint2_pixels:
    #             cv2.circle(img, tuple(p), 5, (0, 255, 0), -1)
    #     if not obj2world is None:
    #         for p in obj_pixels:
    #             cv2.circle(img, tuple(p), 5, (0, 255, 255), -1)
    #     return img

    def run(self):
        # convert 
        # vis = o3d.visualization.VisualizerWithKeyCallback()
        # vis_geometry_added = False
        # vis.create_window('reader', 1920, 540)

        print(
            "MKV reader initialized. Press [SPACE] to pause/start, [ESC] to exit."
        )
        
        if self.args["output"] is not None:
            metadata = self.reader.get_metadata()
            print(metadata)
            o3d.io.write_azure_kinect_mkv_metadata(
                '{}/intrinsic.json'.format(self.args["output"]), metadata)
            
            config = {
                'path_dataset': self.args["output"],
                'path_intrinsic': '{}/intrinsic.json'.format(self.args["output"])
            }
            initialize_config(config)
            with open('{}/config.json'.format(self.args["output"]), 'w') as f:
                json.dump(config, f, indent=4)
                
        idx = 0
        if self.args["convert_mkv"] is True:
            while not self.reader.is_eof():
                rgbd = self.reader.next_frame()
                if rgbd is None:
                    continue
                # if not vis_geometry_added:
                #     vis.add_geometry(rgbd)
                #     vis_geometry_added = True

                if self.args["output"] is not None:
                    color_filename = '{0}/color/{1:05d}.jpg'.format(
                        self.args["output"], idx)
                    print('Writing to {}'.format(color_filename))
                    o3d.io.write_image(color_filename, rgbd.color)

                    depth_filename = '{0}/depth/{1:05d}.png'.format(
                        self.args["output"], idx)
                    print('Writing to {}'.format(depth_filename))
                    o3d.io.write_image(depth_filename, rgbd.depth)
                    idx += 1
                # try:
                #     vis.update_geometry(rgbd)
                # except NameError:
                #     pass
                # vis.poll_events()
                # vis.update_renderer()

        self.reader.close()
        
        return 
        # vis
        
        data_dir = self.args["path"]
        
        # get rosbag
        urllib.request.urlretrieve(self.args["rosbag_url"], join(data_dir,"VTS_data.npz"))
        print("get : {}".format(join(data_dir,"VTS_data.npz")))
        
        
        VTS_data = np.load(join(data_dir,"VTS_data.npz"), allow_pickle=True)["data"].item()
        
        cam1_intrinsic = self.args["intrinsic_mat"]
        cam1_pose = np.loadtxt(self.args["camera2world_dir"])
        
        img_dir = join(self.args["output"], "color")
        dirs = os.listdir(img_dir)
        dirs.sort(key=lambda x:int(x.split(".")[0]))
        rgb1_imgs = []
        for f in dirs:
            rgb1_imgs.append(cv2.imread(join(img_dir, f)).astype(np.uint8))
            
        rgb1_timestamps = []
        with open(self.args["ts_dir"], "r") as f:
            ts = f.readlines()[0]
            rgb1_timestamps = ts.split(",")
        rgb1_timestamps = [int((i.split(".")[0] + i.split(".")[1]).ljust(19,"0")) for i in rgb1_timestamps]
        N_rgb1 = len(rgb1_timestamps)
        
        if self.cfg["vis_obj"]: 
            rigid_pose_list = VTS_data["/rigid"]
            # print(VTS_data["rigid_timestamp"][0])
            rigid_timestamps =  [x + self.args["VTS_add_time"] for x in VTS_data["rigid_timestamp"]]
            # print(rigid_timestamps[0])
            labels = VTS_data["/labels"]
            labels_flattened = [e for row in labels for e in row]
            labels_counter = Counter(labels_flattened)
            label_most, label_most_cnt = labels_counter.most_common(1)[0]
            print(label_most, label_most_cnt)
            N_rigid = len(rigid_timestamps)
            print(N_rgb1, N_rigid)
        if self.cfg["vis_person1"]:
            person1_list = VTS_data["/joints"]
            person1_timestamps = [x+self.args["VTS_add_time"] for x in VTS_data["person1_timestamp"]]
            N_person1 = len(person1_timestamps)
        if self.cfg["vis_person2"]:
            person2_list = VTS_data["/joints2"]
            person2_timestamps = [x+self.args["VTS_add_time"] for x in VTS_data["person2_timestamp"]]
            N_person2 = len(person2_timestamps)
            
        threshould = 40000000  # 40ms
        p_rigid, p_person1, p_person2, p_rgb2 = 0, 0, 0, 0

        data_list = []
        for rgb1_idx in range(N_rgb1):
            t = rgb1_timestamps[rgb1_idx]
            # obj_pose align with rgb1
            if self.cfg["vis_obj"]: 
                while (p_rigid + 1 < N_rigid) and (abs(t - rigid_timestamps[p_rigid + 1]) <= abs(t - rigid_timestamps[p_rigid])):
                    p_rigid += 1
            # person1_pose align with rgb1
            if self.cfg["vis_person1"]:
                while (p_person1 + 1 < N_person1) and (abs(t - person1_timestamps[p_person1 + 1]) <= abs(t - person1_timestamps[p_person1])):
                    p_person1 += 1
            # person2_pose align with rgb1
            if self.cfg["vis_person2"]:
                while (p_person2 + 1 < N_person2) and (abs(t - person2_timestamps[p_person2 + 1]) <= abs(t - person2_timestamps[p_person2])):
                    p_person2 += 1

            
            flag = True
            if self.cfg["vis_obj"]:
                flag &= abs(t - rigid_timestamps[p_rigid]) < threshould
            if self.cfg["vis_person1"]:
                flag &= abs(t - person1_timestamps[p_person1]) < threshould
            if self.cfg["vis_person2"]:
                flag &= abs(t - person2_timestamps[p_person2]) < threshould
            if not flag:
                print("[error in preparing paired data] wrong frame idx =", rgb1_idx)
                continue
            
            if self.cfg["vis_obj"]:
                obj2world = None
                rigid_poses = rigid_pose_list[p_rigid]
                device_names = labels[p_rigid]
                obj_label = None
            