"""
Copyright (C) 2020 NVIDIA Corporation.  All rights reserved.
Licensed under the NVIDIA Source Code License. See LICENSE at https://github.com/nv-tlabs/lift-splat-shoot.
Authors: Jonah Philion and Sanja Fidler
"""
import json
import torch
import os
import numpy as np
from PIL import Image
import cv2
from pyquaternion import Quaternion
# from nuscenes.nuscenes import NuScenes
# from nuscenes.utils.splits import create_splits_scenes
# from nuscenes.utils.data_classes import Box
from glob import glob

from .tools import get_lidar_data, img_transform, normalize_img, gen_dx_bx

import gzip
def read_gzipped_json(file_path):
    """
    读取gzip压缩的JSON文件
    
    参数:
        file_path (str): .json.gz文件的路径
        
    返回:
        dict: 解析后的JSON数据
    """
    if not os.path.exists(file_path):
        raise FileNotFoundError(f"文件不存在: {file_path}")
    
    if not file_path.endswith('.json.gz'):
        print(f"警告: 文件扩展名不是.json.gz: {file_path}")
    
    try:
        # 使用gzip打开文件并读取内容
        with gzip.open(file_path, 'rt', encoding='utf-8') as f:
            data = json.load(f)
        return data
    except Exception as e:
        print(f"读取文件时出错: {e}")
        return None

import laspy
def laz_to_nuscenes_format(file_path, min_distance=2.2, reflectance_key='intensity', time_key='gps_time'):
    """
    读取 LAZ 文件并转换为 NuScenes 格式的 5×N 数组，过滤近距离点
    
    参数:
        file_path (str): LAZ 文件的路径
        min_distance (float): 最小距离阈值，过滤距离原点小于此值的点
        reflectance_key (str): 用于反射率的维度名称，默认为 'intensity'
        time_key (str): 用于时间的维度名称，默认为 'gps_time'
        
    返回:
        np.ndarray: 形状为 (5, N) 的数组，包含 x, y, z, reflectance, dt
    """
    # 读取 LAZ 文件
    las = laspy.read(file_path)
    
    # 计算每个点到原点的距离
    distances = np.sqrt(las.x**2 + las.y**2 + las.z**2)
    
    # 过滤掉距离小于 min_distance 的点
    mask = distances >= min_distance
    filtered_x = las.x[mask]
    filtered_y = las.y[mask]
    filtered_z = las.z[mask]
    
    # 获取过滤后的点数
    n = len(filtered_x)
    
    # 创建 5×N 的数组
    points = np.zeros((5, n))
    
    # 填充 x, y, z 坐标
    points[0, :] = filtered_x
    points[1, :] = filtered_y
    points[2, :] = filtered_z
    
    # 填充反射率 (reflectance)
    if hasattr(las, reflectance_key):
        reflectance = getattr(las, reflectance_key)[mask]
        # 归一化反射率到 [0, 1] 范围
        if np.max(reflectance) > 0:
            points[3, :] = reflectance / np.max(reflectance)
        else:
            points[3, :] = reflectance
    else:
        print(f"警告: 未找到反射率维度 '{reflectance_key}'，使用零填充")
    
    # 填充时间 (dt)
    if hasattr(las, time_key):
        time_values = getattr(las, time_key)[mask]
        # 计算相对于第一点的时间差
        points[4, :] = time_values - np.min(time_values)
    else:
        print(f"警告: 未找到时间维度 '{time_key}'，使用零填充")
    
    print(f"过滤后点云数量: {n} (原始: {len(las.points)}, 过滤掉: {len(las.points) - n})")
    
    return points


def laz_to_nuscenes_format(file_path, reflectance_key='intensity', time_key='gps_time'):
    """
    读取 LAZ 文件并转换为 NuScenes 格式的 5×N 数组
    
    参数:
        file_path (str): LAZ 文件的路径
        reflectance_key (str): 用于反射率的维度名称，默认为 'intensity'
        time_key (str): 用于时间的维度名称，默认为 'gps_time'
        
    返回:
        np.ndarray: 形状为 (5, N) 的数组，包含 x, y, z, reflectance, dt
    """
    # 读取 LAZ 文件
    las = laspy.read(file_path)
    
    # 获取点数
    n = len(las.points)
    
    # 创建 5×N 的数组
    points = np.zeros((5, n))
    
    # 填充 x, y, z 坐标
    points[0, :] = las.x
    points[1, :] = las.y
    points[2, :] = las.z
    
    # 填充反射率 (reflectance)
    # 检查是否存在指定的反射率维度
    if hasattr(las, reflectance_key):
        reflectance = getattr(las, reflectance_key)
        # 归一化反射率到 [0, 1] 范围
        if np.max(reflectance) > 0:
            points[3, :] = reflectance / np.max(reflectance)
        else:
            points[3, :] = reflectance
    else:
        print(f"警告: 未找到反射率维度 '{reflectance_key}'，使用零填充")
    
    # 填充时间 (dt)
    # 检查是否存在指定的时间维度
    if hasattr(las, time_key):
        time_values = getattr(las, time_key)
        # 计算相对于第一点的时间差
        points[4, :] = time_values - np.min(time_values)
    else:
        print(f"警告: 未找到时间维度 '{time_key}'，使用零填充")
    
    return points

class Box:
    def __init__(self, center, size, rotation):
        """
        简化版边界框类
        
        参数:
        center (np.array): [x, y, z] 中心位置
        size (np.array): [长, 宽, 高] 尺寸
        rotation (Quaternion): 旋转四元数
        """
        self.center = np.array(center)
        self.size = np.array(size)
        self.rotation = rotation
        
    def translate(self, translation):
        """平移边界框"""
        self.center += translation
        
    def rotate(self, rotation):
        """旋转边界框"""
        # 旋转中心点
        self.center = rotation.rotate(self.center)
        
        # 更新方向
        self.rotation = rotation * self.rotation
        
    def bottom_corners(self):
        """获取底部4个角点"""
        # 计算半尺寸
        half_size = self.size / 2.0
        
        # 定义局部坐标系下的底部角点
        corners = np.array([
            [ half_size[0],  half_size[1], -half_size[2]],  # 前右
            [ half_size[0], -half_size[1], -half_size[2]],  # 前左
            [-half_size[0], -half_size[1], -half_size[2]],  # 后左
            [-half_size[0],  half_size[1], -half_size[2]],  # 后右
        ])
        
        # 旋转角点
        rotated_corners = np.array([self.rotation.rotate(corner) for corner in corners])
        
        # 平移角点
        return rotated_corners + self.center

class Bench2DriveData(torch.utils.data.Dataset):
    def __init__(self, 
                 data_root, 
                 is_train, 
                 data_aug_conf, 
                 grid_conf):
        # self.data_root = data_root
        self.data_root = "/home/quzheng/wks/datasets/b2d"
        self.is_train = is_train
        self.data_aug_conf = data_aug_conf
        self.grid_conf = grid_conf

        self.scenes = self.get_scenes()
        self.ixes = self.prepro()

        dx, bx, nx = gen_dx_bx(grid_conf['xbound'], grid_conf['ybound'], grid_conf['zbound'])
        self.dx, self.bx, self.nx = dx.numpy(), bx.numpy(), nx.numpy()

        print(self)
        """If nuscenes is stored with trainval/1 trainval/2 ... structure, adjust the file paths
        stored in the nuScenes object.
        """
        # check if default file paths work
        rec = self.ixes[0]
        sampimg = self.nusc.get('sample_data', rec['data']['CAM_FRONT'])
        imgname = os.path.join(self.nusc.dataroot, sampimg['filename'])

        def find_name(f):
            d, fi = os.path.split(f)
            d, di = os.path.split(d)
            d, d0 = os.path.split(d)
            d, d1 = os.path.split(d)
            d, d2 = os.path.split(d)
            return di, fi, f'{d2}/{d1}/{d0}/{di}/{fi}'

        # adjust the image paths if needed
        if not os.path.isfile(imgname):
            print('adjusting nuscenes file paths')
            fs = glob(os.path.join(self.nusc.dataroot, 'samples/*/samples/CAM*/*.jpg'))
            fs += glob(os.path.join(self.nusc.dataroot, 'samples/*/samples/LIDAR_TOP/*.pcd.bin'))
            info = {}
            for f in fs:
                di, fi, fname = find_name(f)
                info[f'samples/{di}/{fi}'] = fname
            fs = glob(os.path.join(self.nusc.dataroot, 'sweeps/*/sweeps/LIDAR_TOP/*.pcd.bin'))
            for f in fs:
                di, fi, fname = find_name(f)
                info[f'sweeps/{di}/{fi}'] = fname
            for rec in self.nusc.sample_data:
                if rec['channel'] == 'LIDAR_TOP' or (rec['is_key_frame'] and rec['channel'] in self.data_aug_conf['cams']):
                    rec['filename'] = info[rec['filename']]

    
    def get_scenes(self):
        """获取所有场景（clip）目录路径"""
        scenes = [d for d in glob(os.path.join(self.data_root, '*')) if os.path.isdir(d)]
        # 分割训练集和验证集 (80/20)
        split_idx = int(0.8 * len(scenes))
        return scenes[:split_idx] if self.is_train else scenes[split_idx:]

    def prepro(self):
        """预处理生成所有帧索引"""
        samples = []
        
        # 支持的相机列表
        cam_types = ['rgb_front_left',
                     'rgb_front', 
                     'rgb_front_right',
                     'rgb_back_left',
                     'rgb_back', 
                     'rgb_back_right']
        
        for scene in self.scenes:
            # 获取场景中所有帧ID
            frame_ids = sorted(f.split('.')[0] for f in os.listdir(os.path.join(scene, 'anno')) 
                         if f.endswith('.json'))
            
            for frame_id in frame_ids:
                # 检查所有相机是否都有该帧数据
                valid = True
                for cam in cam_types:
                    img_path = os.path.join(scene, 'camera', cam, f"{frame_id}.jpg")
                    if not os.path.exists(img_path):
                        valid = False
                        break
                
                if valid:
                    samples.append({
                        'scene_path': scene,
                        'frame_id': frame_id
                    })
        
        return samples
    
    def sample_augmentation(self):
        H, W = self.data_aug_conf['H'], self.data_aug_conf['W']
        fH, fW = self.data_aug_conf['final_dim']
        if self.is_train:
            resize = np.random.uniform(*self.data_aug_conf['resize_lim'])
            resize_dims = (int(W*resize), int(H*resize))
            newW, newH = resize_dims
            crop_h = int((1 - np.random.uniform(*self.data_aug_conf['bot_pct_lim']))*newH) - fH
            crop_w = int(np.random.uniform(0, max(0, newW - fW)))
            crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
            flip = False if self.data_aug_conf['rand_flip'] and np.random.choice([0, 1]) else True
            rotate = np.random.uniform(*self.data_aug_conf['rot_lim'])
        else:
            resize = max(fH/H, fW/W)
            resize_dims = (int(W*resize), int(H*resize))
            newW, newH = resize_dims
            crop_h = int((1 - np.mean(self.data_aug_conf['bot_pct_lim']))*newH) - fH
            crop_w = int(max(0, newW - fW) / 2)
            crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
            flip = False
            rotate = 0
        return resize, resize_dims, crop, flip, rotate

# def load_calibration(self):
#     """加载相机标定参数（假设所有场景使用相同的标定）"""
#     calib_path = os.path.join(self.data_root, 'calibration.json')
    
#     with open(calib_path, 'r') as f:
#         calib = json.load(f)
    
#     # 处理标定数据
#     calibration = {}
#     for cam, params in calib['camera_intrinsics'].items():
#         # 创建四元数并转换为旋转矩阵
#         q = Quaternion(params['extrinsic']['rotation'])
        
#         calibration[cam] = {
#             'intrinsic': torch.tensor(params['intrinsic'], dtype=torch.float32),
#             'extrinsic': {
#                 'rotation': torch.tensor(q.rotation_matrix, dtype=torch.float32),
#                 'translation': torch.tensor(params['extrinsic']['translation'], dtype=torch.float32)
#             }
#         }
    
#     return calibration

    def parse_sensor_params(self, sample):
        scene_path = sample['scene_path']
        frame_id = sample['frame_id']

        anno_gz_path = os.path.join(scene_path, 'anno', f"{frame_id}.json.gz")
        anno_data = read_gzipped_json(anno_gz_path)

        # json keys
        # 'x', 'y', 'throttle', 'steer', 'brake', 'reverse', 'theta', 'speed', 'x_command_far', 'y_command_far', 'command_far', 'x_command_near', 'y_command_near', 'command_near', 'should_brake', 'x_target', 'y_target', 'next_command', 'weather', 'acceleration', 'angular_velocity', 'bounding_boxes', 'sensors', 'only_ap_brake'
        
        anno_data['bounding_boxes']
        for bounding_box in anno_data['bounding_boxes']:
            # 'class', 'id', 'type_id', 'base_type', 'location', 'rotation', 'bbx_loc', 'center', 'extent', 'world_cord', 'semantic_tags', 'color', 'speed', 'brake', 'road_id', 'lane_id', 'section_id', 'world2ego'
            bounding_box['class']

        # 'CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', 'TOP_DOWN', 'RADAR_FRONT', 'RADAR_FRONT_LEFT', 'RADAR_FRONT_RIGHT', 'RADAR_BACK_LEFT', 'RADAR_BACK_RIGHT', 'LIDAR_TOP'
        anno_data['sensors']
        for sensor_name, sensor_info in anno_data['sensors'].items():
            if sensor_name[:3] == 'CAM':
                # 'location', 'rotation', 'intrinsic', 'world2cam', 'cam2ego', 'fov', 'image_size_x', 'image_size_y'
                sensor_info['intrinsic']
            elif sensor_name[:5] == 'RADAR':
                # 'location', 'rotation', 'world2radar', 'radar2ego'
                sensor_info['radar2ego']
            elif sensor_name == 'LIDAR_TOP':
                # 'location', 'rotation', 'world2lidar', 'lidar2ego'
                sensor_info['lidar2ego']

    def get_image_data(self, sample):
        scene_path = sample['scene_path']
        frame_id = sample['frame_id']

        imgs = []
        rotations = []
        translations = []
        intrinsics = []
        post_rotations = []
        post_translations = []

        # 选择使用的相机 (随机或固定)
        cams = self.choose_cams()

        for cam in cams:
            # 构建图像路径 - 直接访问文件系统
            img_path = os.path.join(scene_path, 'camera', cam, f"{frame_id}.jpg")
            img = Image.open(img_path)

            # 初始化后处理参数
            post_rotation = torch.eye(2)
            post_translation = torch.zeros(2)

            # 从预加载的标定数据获取参数
            cam_calib = self.calibration[cam]
            intrinsic = cam_calib['intrinsic']
            rotation = cam_calib['extrinsic']['rotation']
            translation = cam_calib['extrinsic']['translation']

            # 应用数据增强 (与原始实现相同)
            resize, resize_dims, crop, flip, rotate = self.sample_augmentation()
            img, aug_post_rotation, aug_post_translation = img_transform(
                img, post_rotation, post_translation,
                resize=resize,
                resize_dims=resize_dims,
                crop=crop,
                flip=flip,
                rotate=rotate
            )

            # 转换为3x3矩阵
            post_translation = torch.zeros(3)
            post_rotation = torch.eye(3)
            post_translation[:2] = aug_post_translation
            post_rotation[:2, :2] = aug_post_rotation

            # 收集结果
            imgs.append(normalize_img(img))
            intrinsics.append(intrinsic)
            rotations.append(rotation)
            translations.append(translation)
            post_rotations.append(post_rotation)
            post_translations.append(post_translation)

        return (
            torch.stack(imgs), 
            torch.stack(rotations), 
            torch.stack(translations),
            torch.stack(intrinsics), 
            torch.stack(post_rotations), 
            torch.stack(post_translations)
        )

    def get_lidar_data(self, sample):
        scene_path = sample['scene_path']
        frame_id = sample['frame_id']
        laz_path = os.path.join(scene_path, 'lidar', f"{frame_id}.laz")
        points = laz_to_nuscenes_format(laz_path)
        return torch.Tensor(points)[:3]  # reflectance and dt are not used

    def get_binimg(self, sample):
        """
        根据 Bench2Drive 标注生成鸟瞰图二值图像。

        参数:
        sample (dict): 包含场景路径和帧ID的样本

        返回:
        torch.Tensor: 生成的二值图像，形状为 (1, nx[0], nx[1])
        """
        scene_path = sample['scene_path']
        frame_id = sample['frame_id']

        # 读取标注文件
        # anno_path = os.path.join(scene_path, 'anno', f"{frame_id}.json")
        # with open(anno_path, 'r') as f:
        #     anno_data = json.load(f)
        
        anno_gz_path = os.path.join(scene_path, 'anno', f"{frame_id}.json.gz")
        anno_data = read_gzipped_json(anno_gz_path)

        # 获取自车位姿
        ego_pose = anno_data
        trans = -np.array([ego_pose['x'], ego_pose['y'], 0])  # z设为0
        rot = Quaternion(axis=[0, 0, 1], angle=ego_pose['theta']).inverse

        # 创建空的鸟瞰图像
        img = np.zeros((self.nx[0], self.nx[1]))

        # 处理所有边界框
        for bbox in anno_data.get('bounding_boxes', []):
            # 跳过自车和非车辆
            if bbox.get('class') == 'ego_vehicle' or bbox.get('base_type') != 'car':
                continue
            
            # 从标注中提取位置、尺寸和旋转
            location = np.array(bbox['location'])
            size = np.array(bbox['extent'])[[0, 1, 2]]  # 使用长宽高
            rotation = Quaternion(axis=[0, 0, 1], angle=np.radians(bbox['rotation'][2]))

            # 创建边界框对象
            box = Box(location, size, rotation)

            # 转换到自车坐标系
            box.translate(trans)
            box.rotate(rot)

            # 获取底部角点并投影到BEV网格
            pts = box.bottom_corners()[:2].T
            pts = np.round(
                (pts - self.bx[:2] + self.dx[:2]/2.) / self.dx[:2]
            ).astype(np.int32)

            # 调整坐标顺序并填充多边形
            pts[:, [1, 0]] = pts[:, [0, 1]]
            cv2.fillPoly(img, [pts], 1.0)

        return torch.Tensor(img).unsqueeze(0)

    def choose_cams(self):
        # self.data_aug_conf['cams'] 来自 train.py 中的配置，它的值是 ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
        # 即，self.data_aug_conf['cams'] 是一个长度为 6 的列表
        if self.is_train and self.data_aug_conf['Ncams'] < len(self.data_aug_conf['cams']):
            cams = np.random.choice(self.data_aug_conf['cams'], self.data_aug_conf['Ncams'], replace=False)
        else:
            cams = self.data_aug_conf['cams']
        return cams

    def __str__(self):
        return f"""Bench2DriveData: {len(self)} samples. Split: {"train" if self.is_train else "val"}. \n Augmentation Conf: {self.data_aug_conf}"""

    def __len__(self):
        return len(self.ixes)


class VizData(Bench2DriveData):
    '''
    用于测试和验证的数据集

    和 SegmentationData 的唯一不同是多返回了 lidar_data
    '''
    def __init__(self, *args, **kwargs):
        super(VizData, self).__init__(*args, **kwargs)
    
    def __getitem__(self, index):
        sample = self.ixes[index]
        
        # cams = self.choose_cams()
        imgs, rots, trans, intrins, post_rots, post_trans = self.get_image_data(sample)
        
        # todo : laspy
        lidar_data = self.get_lidar_data(sample, nsweeps=3)
        binimg = self.get_binimg(sample)
        
        return imgs, rots, trans, intrins, post_rots, post_trans, lidar_data, binimg


class SegmentationData(Bench2DriveData):
    '''
    用于训练的数据集

    和 VizData 的唯一不同是没有返回 lidar_data
    '''
    def __init__(self, *args, **kwargs):
        super(SegmentationData, self).__init__(*args, **kwargs)
    
    def __getitem__(self, index):
        sample = self.ixes[index]

        # cams = self.choose_cams()
        imgs, rots, trans, intrins, post_rots, post_trans = self.get_image_data(sample)
        binimg = self.get_binimg(sample)
        
        return imgs, rots, trans, intrins, post_rots, post_trans, binimg


def worker_random_init(x):
    np.random.seed(13 + x)


def compile_data(dataroot, data_aug_conf, grid_conf, bsz, nworkers):
    train_dataset = VizData(dataroot=None, is_train=True, data_aug_conf=data_aug_conf, grid_conf=grid_conf)
    val_dataset = SegmentationData(dataroot=None, is_train=False, data_aug_conf=data_aug_conf, grid_conf=grid_conf)

    # 分布式训练需要使用自定义的分布式采样器
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=bsz,
                                              num_workers=nworkers,
                                              drop_last=True,
                                              sampler=train_sampler,    # 分布式采样器，注意自定义采样器与shuffle不能同时使用
                                              worker_init_fn=worker_random_init)
    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=bsz,
                                            shuffle=False,
                                            num_workers=nworkers)

    return train_loader, val_loader

if __name__ == "__main__":
    # test_b2d_loader = Bench2DriveData()
    # train_loader, val_loader = compile_data() 

    H=900
    W=1600
    resize_lim=(0.193, 0.225)
    final_dim=(128, 352)
    bot_pct_lim=(0.0, 0.22)
    rot_lim=(-5.4, 5.4)
    rand_flip=True
    ncams=5
    max_grad_norm=5.0
    pos_weight=2.13
    data_aug_conf = {
        'resize_lim': resize_lim,
        'final_dim': final_dim,
        'rot_lim': rot_lim,
        'H': H, 'W': W,
        'rand_flip': rand_flip,
        'bot_pct_lim': bot_pct_lim,
        'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
                 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'],
        'Ncams': ncams,
    }
 
    xbound=[-50.0, 50.0, 0.5]
    ybound=[-50.0, 50.0, 0.5]
    zbound=[-10.0, 10.0, 20.0]   # 在 z 方向上，仅有 1 个网格。因为 BEV 显然忽略了高度信息
    dbound=[4.0, 45.0, 1.0]
    grid_conf = {
        'xbound': xbound,
        'ybound': ybound,
        'zbound': zbound,
        'dbound': dbound,
    }
    bsz=16
    nworkers=4

    train_loader, val_loader = compile_data(dataroot="data_path", data_aug_conf=data_aug_conf,
                                          grid_conf=grid_conf, bsz=bsz, nworkers=nworkers)

