from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# import pickle
import glob
import json
import numpy as np
import cv2
from pyquaternion import Quaternion

DATA_PATH = '/home/junqi/2023-02-27-17-39-45/'

# DEBUG = True
DEBUG = False
# VAL_PATH = DATA_PATH + 'training/label_val/'
import os

SPLITS = ['3dop', 'subcnn']
import _init_paths
from utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y,rot_y2alpha
from utils.ddd_utils import draw_box_3d, unproject_2d_to_3d


'''
#Values    Name      Description
----------------------------------------------------------------------------
   1    type         Describes the type of object: 'Car', 'Van', 'Truck',
                     'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
                     'Misc' or 'DontCare'
   1    truncated    Float from 0 (non-truncated) to 1 (truncated), where
                     truncated refers to the object leaving image boundaries
   1    occluded     Integer (0,1,2,3) indicating occlusion state:
                     0 = fully visible, 1 = partly occluded
                     2 = largely occluded, 3 = unknown
   1    alpha        Observation angle of object, ranging [-pi..pi]
   4    bbox         2D bounding box of object in the image (0-based index):
                     contains left, top, right, bottom pixel coordinates
   3    dimensions   3D object dimensions: height, width, length (in meters)
   3    location     3D object location x,y,z in camera coordinates (in meters)
   1    rotation_y   Rotation ry around Y-axis in camera coordinates [-pi..pi]
   1    score        Only for results: Float, indicating confidence in
                     detection, needed for p/r curves, higher is better.
'''

# c = np.array(
#     [[438.08785552, 0., 605.85738792, 0.],
#      [0., 436.08672017, 390.9484255, 0.],
#      [0., 0., 1., 0.]])
# c = np.array([[4.3498150431641488e+02, 0.000000e+00, 6.0874976734530969e+02,0.0],
#       [0.000000e+00, 4.3434032708470619e+02, 3.1742539643161552e+02,0.0],
#       [0.000000e+00, 0.000000e+00, 1.000000e+00,0.0]])
# 605.8573879241806 438.0878555229591

def _bbox_to_coco_bbox(bbox):
    return [(bbox[2]), (bbox[3]),
            abs(bbox[0] - bbox[2]), abs(bbox[1] - bbox[3])]
    # return [(bbox[0]), (bbox[1]),
    #         (bbox[2]), (bbox[3])]

def _bbox_to_coco_center(bbox):
    return [bbox[2] + abs((bbox[0] - bbox[2]) / 2.0), bbox[3] + abs((bbox[1] - bbox[3]) / 2.0)]
def transform_matrix(translation: np.ndarray = np.array([0, 0, 0]),
                     rotation: Quaternion = Quaternion([1, 0, 0, 0]),
                     inverse: bool = False) -> np.ndarray:
    """
    Convert pose to transformation matrix.
    :param translation: <np.float32: 3>. Translation in x, y, z.
    :param rotation: Rotation in quaternions (w ri rj rk).
    :param inverse: Whether to compute inverse transform matrix.
    :return: <np.float32: 4, 4>. Transformation matrix.
    """
    tm = np.eye(4)

    if inverse:
        rot_inv = rotation.rotation_matrix.T
        trans = np.transpose(-np.array(translation))
        tm[:3, :3] = rot_inv
        tm[:3, 3] = rot_inv.dot(trans)
    else:
        tm[:3, :3] = rotation.rotation_matrix
        tm[:3, 3] = np.transpose(np.array(translation))

    return tm

def read_clib(calib_path):
    f = open(calib_path, 'r')
    lines = f.readlines()
    for i, line in enumerate(lines):
        if i == 0:
            calib = np.array(line[:-1].split(' ')[1:], dtype=np.float32)
            calib = calib.reshape(3, 4)
            return calib
        # if i == 1:
        #     extrin = np.array(line[:-1].split(' ')[1:], dtype=np.float32)
        #     extrin = extrin.reshape(4, 4)
        #     return extrin


cats = ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck', 'Person_sitting',
        'Tram', 'Misc', 'DontCare']

ATTRIBUTE_TO_ID = {
  '': 0, 'cycle.with_rider' : 1, 'cycle.without_rider' : 2,
  'pedestrian.moving': 3, 'pedestrian.standing': 4,
  'pedestrian.sitting_lying_down': 5,
  'vehicle.moving': 6, 'vehicle.parked': 7,
  'vehicle.stopped': 8}
# cats = [ "unkonw","person", "bike","car","motor_bike","bus", "truck", "rider" ,"out_fov", "no-assis" ,"overcut_delete", ]

# cats = ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'rider','van', 'others']

cat_ids = {cat: i + 1 for i, cat in enumerate(cats)}

# cat_info = [{"name": "pedestrian", "id": 1}, {"name": "vehicle", "id": 2}]
# F = 721
# H = 384  # 375
# W = 1248  # 1242
# EXT = [45.75, -0.34, 0.005]
# CALIB = np.array([[F, 0, W / 2, EXT[0]], [0, F, H / 2, EXT[1]],
#                   [0, 0, 1, EXT[2]]], dtype=np.float32)

cat_info = []
for i, cat in enumerate(cats):
    cat_info.append({'name': cat, 'id': i + 1})

# image_set_path = DATA_PATH + 'split/'
cam_dir = DATA_PATH + 'camera/'
ann_dir = DATA_PATH + 'label/'
calib_dir = DATA_PATH + 'calib/'
radar_dir = DATA_PATH + 'radar/'
# train_image_dir = 'samples/CAM_FRONT/'
train_image_dir = 'samples/CAM_FRONT_tjf/'
# splits = ['train']
splits = ['train', 'val']
# splits = ['trainval', 'test']
calib_type = {'train': 'training', 'val': 'training', 'trainval': 'training',
              'test': 'testing'}


# split_path = DATA_PATH+"/annotations"
# if not os.path.exists(split_path):
#     os.mkdir(split_path)

for split in splits:
    ret = {'images': [], 'annotations': [], "categories": cat_info}
    # image_set = open(image_set_path + '{}.txt'.format(split), 'r')
    image_set = glob.glob(cam_dir + "/front/*.png")
    # print(image_set)
    # exit()
    image_to_id = {}
    radar_pc_list = []
    frame_id = 0
    for k, line in enumerate(image_set):
        if k == 0:
            img = cv2.imread(line)
            sp = img.shape[0:2]
            img_width = sp[1]
            img_height = sp[0]

        frame_id += 1
        line = line.split('.')[0].split('/')[-1]
        image_id = line

        # Complex coordinate transform. This will take time to understand.
        pose_record_trans = np.array([0, 0, 0])
        pose_record_rot = np.array([1, 0, 0, 0])
        cs_record_trans = np.array([0, 0, 0])
        cs_record_rot = np.array([1, 0, 0, 0])
        # global_from_car = transform_matrix(pose_record['translation'],
        #                                    Quaternion(pose_record['rotation']),
        #                                    inverse=False)
        # car_from_sensor = transform_matrix(cs_record['translation'],
        #                                    Quaternion(cs_record['rotation']),
        #                                    inverse=False)
        # with open(calib_dir + '/camera/front.json', 'r') as f:
        #     calib = json.load(f)
        calib_path = calib_dir + '{}_front.txt'.format(line)
        f = open(calib_path, 'r')
        lines = f.readlines()
        for i, line1 in enumerate(lines):
            if i == 0:
                calib = np.array(line1[:-1].split(' ')[1:], dtype=np.float32)
                calib = calib.reshape(3, 4)
            if i == 1:
                extrinsic = np.fromstring(line1.strip().split(' ', 1)[1], sep=' ', dtype=np.float32)
                extrinsic = extrinsic.reshape(4, 4)
        # calib = read_clib(calib_path)

        radar_path = radar_dir + '{}.txt'.format(line)
        f = open(radar_path, 'r')
        lines = f.readlines()
        radar_pc = np.zeros((len(lines)-1, 18))
        lines = lines[1:]
        for i, line_ in enumerate(lines):
            try:
                values = np.fromstring(line_, dtype=float, sep=' ')
                # print(len(values))
                if len(values) != 9:
                    raise ValueError(f"Line {i + 1} does not contain 9 values: {line_}")
                values = values[:4]
                values = np.insert(values, 2, [0] * 4)
                values = np.insert(values, 8, [0] * 10)
                values[4] = i + 1
                # TODO 把毫米波坐标转换到相机坐标系下
                point = values[:4]
                point[3] = 1
                point_homogeneous = np.dot(extrinsic, point)
                values[:3] = point_homogeneous[:3] / point_homogeneous[3]

                radar_pc[i, :] = values
            except ValueError as e:
                print(f"Error parsing line {i + 1}: {e}")
        # radar_pc = radar_pc[1:]
        radar_pc_trans = radar_pc.T
        # radar_pc_list.append(radar_pc_trans.tolist())
        # for i, line in enumerate(f):
        #radar_pc = np.zeros((len(lines), 4))
        image_info = {'id': frame_id,
                      'file_name': train_image_dir + '{}.png'.format(line),
                      'calib': calib.tolist(),
                      'video_id': 0,
                      'frame_id': frame_id,
                      'sensor_id': 1,
                      'trans_matrix': extrinsic.tolist(),
                      'velocity_trans_matrix': extrinsic.tolist(),
                      'width': img_width,
                      'height': img_height,
                      'pose_record_trans': pose_record_trans.tolist(),
                      'pose_record_rot': pose_record_rot.tolist(),
                      'cs_record_trans': cs_record_trans.tolist(),
                      'cs_record_rot': cs_record_rot.tolist(),
                      'radar_pc': radar_pc_trans.tolist(),
                      'camera_intrinsic': calib.tolist()}
        ret['images'].append(image_info)

        ann_path = ann_dir + '2023-02-27-17-39-45_{}_front.txt'.format(line)
        anns = open(ann_path, 'r')
        #
        if DEBUG:
            print(cam_dir + 'front/' + image_info['file_name'])
            image = cv2.imread(
                cam_dir + 'front/' + image_info['file_name'])

        for ann_ind, txt in enumerate(anns):
            tmp = txt[:-1].split(' ')
            cat_id = cat_ids[tmp[0]]
            truncated = int(float(tmp[1]))
            occluded = int(tmp[2])
            # alpha = float(tmp[3])
            bbox = [float(tmp[4]), float(tmp[5]), float(tmp[6]), float(tmp[7])]
            #NOTE CenterFusion源码是这个顺序保存的 wjq
            # h, w, l是我们roadside数据的顺序
            dim = [float(tmp[8]), float(tmp[9]), float(tmp[10])]
            #dim = [float(tmp[10]), float(tmp[9]), float(tmp[8])]
            location = [float(tmp[11]), float(tmp[12]), float(tmp[13])]
            rotation_y = float(tmp[14])
            # if "front" in calib_path:
            #     rotation_y -= 1.57
            x = (bbox[2] + bbox[0]) / 2
            alpha = rot_y2alpha(rotation_y, x, calib[0, 2], calib[0, 0])
            # print(alpha)
            # print(rotation_y)
            rotation_y = alpha2rot_y(alpha, x, calib[0, 2], calib[0, 0])
            # print(rotation_y)
            # print("===
            #TODO 1，由于我们现在标注软件没有对目标状态进行标注所以暂时这里的attribute是没有的，后续可以添加 \
            #TODO 2，因为这版标注数据没有加入track_id这个信息，和工程师沟通后续可以加入，等下一版会添加，然后通过track_id和文件名保存的时间戳来计算相机检测3D框的速度（nusc.box_velocity(box.token)）
            att = ''
            velocity = np.array([np.nan, np.nan, np.nan])
            velocity_cam = np.array([np.nan, np.nan, np.nan])
            ann = {'image_id': frame_id,
                   'id': int(len(ret['annotations']) + 1),
                   'category_id': cat_id,
                   'dim': dim,
                   'location': location,
                   'depth': location[2],
                   'occluded': occluded,
                   'truncated': truncated,
                   'rotation_y': rotation_y,
                   'amodel_center': _bbox_to_coco_center(bbox),
                   'iscrowd': 0,
                   'track_id': 0,
                   'attribute': ATTRIBUTE_TO_ID[att],
                   'bbox': _bbox_to_coco_bbox(bbox),
                   'alpha': alpha,
                   'area': (abs(bbox[0] - bbox[2]) * abs(bbox[1] - bbox[3])),
                   'velocity': velocity.tolist(),
                   'velocity_cam': velocity_cam.tolist()}
            ret['annotations'].append(ann)
            if DEBUG and tmp[0] != 'DontCare':
                # print(rotation_y)
                x = (bbox[2] + bbox[0]) / 2
                # alpha = rot_y2alpha(rotation_y, x, c[0, 2], c[0, 0])
                # rotation_y = alpha2rot_y(alpha, x, c[0, 2], c[0, 0])
                # print('alpha', alpha,)
                # print('alpha2rot_y',rotation_y, )
                cv2.rectangle(image, (int(bbox[2]), int(bbox[3])), (int(bbox[0]), int(bbox[1])), (1, 0, 0), 2)

                box_3d = compute_box_3d(dim, location, rotation_y)
                box_2d = project_to_image(box_3d, calib)
                if box_3d.mean()==0:
                    continue
                # print('box_2d', box_2d)
                image = draw_box_3d(image, box_2d)

                # '''
                # print('rot_y, alpha2rot_y, dlt', tmp[0],
                #       rotation_y, alpha2rot_y(alpha, x, calib[0, 2], calib[0, 0]),
                #       np.cos(
                #         rotation_y - alpha2rot_y(alpha, x, calib[0, 2], calib[0, 0])))
                # '''
                # print('alpha, rot_y2alpha',alpha, rot_y2alpha(rotation_y, x, calib[0, 2], calib[0, 0]),)

                depth = np.array([location[2]], dtype=np.float32)
                pt_2d = np.array([(bbox[2] + bbox[3]) / 2, (bbox[0] + bbox[1]) / 2],
                                 dtype=np.float32)
                pt_3d = unproject_2d_to_3d(pt_2d, depth, calib)
                pt_3d[1] += dim[0] / 2
                print('pt_3d', pt_3d)
                print('location', location)
        if DEBUG:
            cv2.imshow('image', image)
            cv2.waitKey()


    print("# images: ", len(ret['images']))
    print("# annotations: ", len(ret['annotations']))
    # import pdb; pdb.set_trace()
    out_path = '{}/annotations/roadside_{}.json'.format(DATA_PATH, split)
    out_dir = '{}/annotations/'.format(DATA_PATH)
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    print(out_path)
    json.dump(ret, open(out_path, 'w'))
