#---------------------------------------------------------------------------------#
# Visual Point Cloud Forecasting enables Scalable Autonomous Driving              #
# Copyright (c) OpenDriveLab. All rights reserved.                                #
#---------------------------------------------------------------------------------#

import copy
import torch
import numpy as np
import os

# 导入rdflib
try:
    from rdflib import Graph
except ImportError:
    print("警告: 未安装rdflib库，无法加载.ttl格式的nSKG数据")

from mmdet.datasets import DATASETS
from nuscenes.eval.common.utils import quaternion_yaw, Quaternion
from nuscenes.utils.geometry_utils import transform_matrix
from mmcv.parallel import DataContainer as DC

from .nuscenes_vidar_dataset_template import NuScenesViDARDatasetTemplate


@DATASETS.register_module()
class NuScenesViDARDatasetV1(NuScenesViDARDatasetTemplate):  # 确保类名为NuScenesViDARDatasetV1
    """NuScenes visual point cloud forecasting dataset.
    """
    def __init__(self,
                 ann_file,
                 pipeline=None,
                 data_root=None,
                 classes=None,
                 load_interval=1,
                 modality=None,
                 box_type_3d='LiDAR',
                 filter_empty_gt=True,
                 test_mode=False,
                 use_valid_flag=False,
                 history_queue_length=None,
                 pred_history_frame_num=0,
                 pred_future_frame_num=0,
                 per_frame_loss_weight=(1.0,),
                 use_nskg=False,
                 nskg_path=None,
                 nskg_ontology_path=None,
                 use_nstp=False,  # 添加nSTP支持参数
                 nstp_path=None,  # 添加nSTP数据路径参数
                 **kwargs):
        # 保存history_queue_length参数，但不传递给父类
        self.history_queue_length = history_queue_length
        
        # 调用父类初始化方法，移除history_queue_length参数
        super().__init__(
            ann_file=ann_file,
            pipeline=pipeline,
            data_root=data_root,
            classes=classes,
            load_interval=load_interval,
            modality=modality,
            box_type_3d=box_type_3d,
            filter_empty_gt=filter_empty_gt,
            test_mode=test_mode,
            use_valid_flag=use_valid_flag,
            **kwargs)
        
        # 保存nSKG相关参数
        self.use_nskg = use_nskg
        self.nskg_path = nskg_path
        self.nskg_ontology_path = nskg_ontology_path
        
        # 保存nSTP相关参数
        self.use_nstp = use_nstp
        self.nstp_path = nstp_path
        
        # 保存预测帧数相关参数
        self.pred_history_frame_num = pred_history_frame_num
        self.pred_future_frame_num = pred_future_frame_num
        self.per_frame_loss_weight = per_frame_loss_weight
        
        # 如果启用nSKG，加载相关数据
        if self.use_nskg and self.nskg_path is not None:
            self._load_nskg_data()
    
    def _load_nskg_data(self):
        """加载nSKG数据"""
        self.nskg_data = {}
        if not os.path.exists(self.nskg_path):
            print(f"警告: nSKG数据路径 {self.nskg_path} 不存在")
            self.use_nskg = False
            return

        try:
            if self.nskg_path.endswith('.ttl') and self.nskg_ontology_path is not None:
                g = Graph()
                try:
                    g.parse(self.nskg_path, format='turtle')
                except Exception as e:
                    print(f"警告: TTL文件解析失败: {str(e)}")
                    self.use_nskg = False
                    return

                # 加载本体文件
                if os.path.exists(self.nskg_ontology_path):
                    for onto_file in os.listdir(self.nskg_ontology_path):
                        if onto_file.endswith('.ttl'):
                            onto_path = os.path.join(self.nskg_ontology_path, onto_file)
                            try:
                                g.parse(onto_path, format='turtle')
                            except Exception as e:
                                print(f"警告: 本体文件 {onto_file} 解析失败: {str(e)}")

                print(f"成功加载nSKG数据，共 {len(g)} 个三元组")
                self.nskg_data = self._convert_rdf_to_pyg(g)
            else:
                import pickle
                with open(self.nskg_path, 'rb') as f:
                    self.nskg_data = pickle.load(f)
                print(f"成功加载nSKG数据，共 {len(self.nskg_data)} 条记录")
        except Exception as e:
            print(f"加载nSKG数据失败: {str(e)}")
            print("继续训练，但不使用nSKG数据")
            self.use_nskg = False
    
    def _convert_rdf_to_pyg(self, graph):
        """将RDF图转换为PyG格式
        
        Args:
            graph: RDF图对象
            
        Returns:
            转换后的数据字典，键为sample_token
        """
        result = {}
        try:
            import torch_geometric as pyg
            
            # 查询所有场景
            scenes = {}
            for s, p, o in graph.triples((None, None, None)):
                # 假设每个场景都有一个token属性
                if str(p).endswith('hasToken'):
                    scene_uri = str(s)
                    token = str(o)
                    scenes[scene_uri] = token
            
            # 为每个场景构建图
            for scene_uri, token in scenes.items():
                # 收集节点
                nodes = {}
                node_types = {}
                node_features = {}
                
                # 收集边
                edges = {}
                
                # 查询与场景相关的所有三元组
                for s, p, o in graph.triples((None, None, None)):
                    # 处理节点和边的逻辑...
                    pass
                
                # 构建PyG数据对象
                data = {
                    'x': node_features,
                    'edge_index': edges,
                    'node_type': node_types
                }
                
                result[token] = data
                
            return result
        except ImportError:
            print("警告: 未安装PyTorch Geometric库，无法转换RDF数据为图格式")
            return {}
    
    def get_data_info(self, index):
        """获取数据信息，添加nSKG或nSTP数据"""
        info = super().get_data_info(index)
        
        # 获取当前样本的标识符
        sample_token = info.get('sample_token', None)
        
        # 如果启用nSKG，添加nSKG数据到info中
        if self.use_nskg and hasattr(self, 'nskg_data') and self.nskg_data and sample_token in self.nskg_data:
            info['nskg_graph'] = self.nskg_data[sample_token]
        
        # 如果启用nSTP，添加nSTP数据到info中（优先使用nSTP）
        if self.use_nstp and hasattr(self, 'nstp_data') and self.nstp_data and sample_token in self.nstp_data:
            info['nstp_graph'] = self.nstp_data[sample_token]
            # 如果同时存在nSKG和nSTP，使用nSTP替代nSKG
            if 'nskg_graph' in info:
                del info['nskg_graph']
        
        return info
    
    def _mask_points(self, pts_list):
        assert self.ego_mask is not None
        # remove points belonging to ego vehicle.
        masked_pts_list = []
        for pts in pts_list:
            ego_mask = np.logical_and(
                np.logical_and(self.ego_mask[0] <= pts[:, 0],
                               self.ego_mask[2] >= pts[:, 0]),
                np.logical_and(self.ego_mask[1] <= pts[:, 1],
                               self.ego_mask[3] >= pts[:, 1]),
            )
            pts = pts[np.logical_not(ego_mask)]
            masked_pts_list.append(pts)
        pts_list = masked_pts_list
        return pts_list

    def union2one(self, previous_queue, future_queue):
        # 1. get transformation from all frames to current (reference) frame
        ref_meta = previous_queue[-1]['img_metas'].data
        valid_scene_token = ref_meta['scene_token']
        # compute reference e2g_transform and g2e_transform.
        ref_e2g_translation = ref_meta['ego2global_translation']
        ref_e2g_rotation = ref_meta['ego2global_rotation']
        ref_e2g_transform = transform_matrix(
            ref_e2g_translation, Quaternion(ref_e2g_rotation), inverse=False)
        ref_g2e_transform = transform_matrix(
            ref_e2g_translation, Quaternion(ref_e2g_rotation), inverse=True)
        ref_l2e_translation = ref_meta['lidar2ego_translation']
        ref_l2e_rotation = ref_meta['lidar2ego_rotation']
        ref_l2e_transform = transform_matrix(
            ref_l2e_translation, Quaternion(ref_l2e_rotation), inverse=False)
        ref_e2l_transform = transform_matrix(
            ref_l2e_translation, Quaternion(ref_l2e_rotation), inverse=True)

        queue = previous_queue[:-1] + future_queue
        pts_list = [each['points'].data for each in queue]
        if self.ego_mask is not None:
            pts_list = self._mask_points(pts_list)
        total_cur2ref_lidar_transform = []
        total_ref2cur_lidar_transform = []
        total_pts_list = []
        for i, each in enumerate(queue):
            meta = each['img_metas'].data

            # store points in the current frame.
            cur_pts = pts_list[i].cpu().numpy().copy()
            cur_pts[:, -1] = i
            total_pts_list.append(cur_pts)

            # store the transformation from current frame to reference frame.
            curr_e2g_translation = meta['ego2global_translation']
            curr_e2g_rotation = meta['ego2global_rotation']
            curr_e2g_transform = transform_matrix(
                curr_e2g_translation, Quaternion(curr_e2g_rotation), inverse=False)
            curr_g2e_transform = transform_matrix(
                curr_e2g_translation, Quaternion(curr_e2g_rotation), inverse=True)

            curr_l2e_translation = meta['lidar2ego_translation']
            curr_l2e_rotation = meta['lidar2ego_rotation']
            curr_l2e_transform = transform_matrix(
                curr_l2e_translation, Quaternion(curr_l2e_rotation), inverse=False)
            curr_e2l_transform = transform_matrix(
                curr_l2e_translation, Quaternion(curr_l2e_rotation), inverse=True)

            # compute future to reference matrix.
            cur_lidar_to_ref_lidar = (curr_l2e_transform.T @
                                      curr_e2g_transform.T @
                                      ref_g2e_transform.T @
                                      ref_e2l_transform.T)
            total_cur2ref_lidar_transform.append(cur_lidar_to_ref_lidar)

            # compute reference to future matrix.
            ref_lidar_to_cur_lidar = (ref_l2e_transform.T @
                                      ref_e2g_transform.T @
                                      curr_g2e_transform.T @
                                      curr_e2l_transform.T)
            total_ref2cur_lidar_transform.append(ref_lidar_to_cur_lidar)

        # 2. Parse previous and future can_bus information.
        imgs_list = [each['img'].data for each in previous_queue]
        metas_map = {}
        prev_scene_token = None
        prev_pos = None
        prev_angle = None
        ref_meta = previous_queue[-1]['img_metas'].data

        # 2.2. Previous
        for i, each in enumerate(previous_queue):
            metas_map[i] = each['img_metas'].data

            if 'aug_param' in each:
                metas_map[i]['aug_param'] = each['aug_param']

            if metas_map[i]['scene_token'] != prev_scene_token:
                metas_map[i]['prev_bev_exists'] = False
                prev_scene_token = metas_map[i]['scene_token']
                prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
                prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
                # Set the original point of this motion.
                new_can_bus = copy.deepcopy(metas_map[i]['can_bus'])
                new_can_bus[:3] = 0
                new_can_bus[-1] = 0
                metas_map[i]['can_bus'] = new_can_bus
            else:
                metas_map[i]['prev_bev_exists'] = True
                tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])
                tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])
                # Compute the later waypoint.
                # To align the shift and rotate difference due to the BEV.
                new_can_bus = copy.deepcopy(metas_map[i]['can_bus'])
                new_can_bus[:3] = tmp_pos - prev_pos
                new_can_bus[-1] = tmp_angle - prev_angle
                metas_map[i]['can_bus'] = new_can_bus
                prev_pos = copy.deepcopy(tmp_pos)
                prev_angle = copy.deepcopy(tmp_angle)

            # compute cur_lidar_to_ref_lidar transformation matrix for quickly align generated
            #  bev features to the reference frame.
            metas_map[i]['ref_lidar_to_cur_lidar'] = total_ref2cur_lidar_transform[i]

        # 2.3. Future
        current_scene_token = ref_meta['scene_token']
        ref_can_bus = None
        future_can_bus = []
        future2ref_lidar_transform = []
        ref2future_lidar_transform = []
        for i, each in enumerate(future_queue):
            future_meta = each['img_metas'].data
            if future_meta['scene_token'] != current_scene_token:
                break

            # store the transformation:
            future2ref_lidar_transform.append(
                total_cur2ref_lidar_transform[i + len(previous_queue) - 1]
            )  # current -> reference.
            ref2future_lidar_transform.append(
                total_ref2cur_lidar_transform[i + len(previous_queue) - 1]
            )  # reference -> current.

            # can_bus information.
            if i == 0:
                new_can_bus = copy.deepcopy(future_meta['can_bus'])
                new_can_bus[:3] = 0
                new_can_bus[-1] = 0
                future_can_bus.append(new_can_bus)
                ref_can_bus = copy.deepcopy(future_meta['can_bus'])
            else:
                new_can_bus = copy.deepcopy(future_meta['can_bus'])

                new_can_bus_pos = np.array([0, 0, 0, 1]).reshape(1, 4)
                ref2prev_lidar_transform = ref2future_lidar_transform[-2]
                cur2ref_lidar_transform = future2ref_lidar_transform[-1]
                new_can_bus_pos = new_can_bus_pos @ cur2ref_lidar_transform @ ref2prev_lidar_transform

                new_can_bus_angle = new_can_bus[-1] - ref_can_bus[-1]
                new_can_bus[:3] = new_can_bus_pos[:, :3]
                new_can_bus[-1] = new_can_bus_angle
                future_can_bus.append(new_can_bus)
                ref_can_bus = copy.deepcopy(future_meta['can_bus'])

        ret_queue = previous_queue[-1]
        ret_queue['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True)
        ret_queue.pop('aug_param', None)

        metas_map[len(previous_queue) - 1]['future_can_bus'] = np.array(future_can_bus)
        metas_map[len(previous_queue) - 1]['future2ref_lidar_transform'] = (
            np.array(future2ref_lidar_transform))
        metas_map[len(previous_queue) - 1]['ref2future_lidar_transform'] = (
            np.array(ref2future_lidar_transform))
        metas_map[len(previous_queue) - 1]['total_cur2ref_lidar_transform'] = (
            np.array(total_cur2ref_lidar_transform))
        metas_map[len(previous_queue) - 1]['total_ref2cur_lidar_transform'] = (
            np.array(total_ref2cur_lidar_transform))

        ret_queue['img_metas'] = DC(metas_map, cpu_only=True)
        ret_queue.pop('points')
        ret_queue['gt_points'] = DC(
            torch.from_numpy(np.concatenate(total_pts_list, 0)), cpu_only=False)
        if len(future_can_bus) < 1 + self.future_length:
            return None
        return ret_queue

    def _load_nstp_data(self):
        """加载nSTP数据"""
        self.nstp_data = {}
        if not os.path.exists(self.nstp_path):
            print(f"警告: nSTP数据路径 {self.nstp_path} 不存在")
            self.use_nstp = False
            return
            
        try:
            import torch
            import glob
            import os.path as osp
            
            # 获取目录中所有的.pt文件
            pt_files = glob.glob(osp.join(self.nstp_path, "*.pt"))
            if not pt_files:
                print(f"警告: 在 {self.nstp_path} 中未找到.pt文件")
                self.use_nstp = False
                return
                
            print(f"找到 {len(pt_files)} 个nSTP数据文件")
            
            # 加载每个.pt文件
            for pt_file in pt_files:
                try:
                    # 从文件名获取样本ID
                    sample_id = osp.splitext(osp.basename(pt_file))[0]
                    
                    # 加载PyTorch张量
                    graph_data = torch.load(pt_file)
                    
                    # 将数据添加到字典中
                    self.nstp_data[sample_id] = graph_data
                    
                except Exception as e:
                    print(f"加载文件 {pt_file} 失败: {str(e)}")
                    
            print(f"成功加载 {len(self.nstp_data)} 个nSTP样本")
            
        except Exception as e:
            print(f"加载nSTP数据失败: {str(e)}")
            print("继续训练，但不使用nSTP数据")
            self.use_nstp = False
