import os
import pickle

import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np

from config.config import get_my_global_config


class DPODataset(Dataset):
    def __init__(self,
                 data_dir: str,
                 history_step: int,
                 future_step: int,
                 ):
        """
        初始化StepDataset
        :param data_dir: 数据根目录，包含多个episode的目录
        :param history_step: 历史时间步数
        :param future_step: 未来时间步数
        """
        self.data_dir = data_dir
        self.history_step = history_step
        self.future_step = future_step
        self.episode_dirs = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
        self.step_files = self._get_step_files()

    def _get_step_files(self):
        """获取所有步骤的文件路径"""
        step_files = []
        for episode_dir in self.episode_dirs:
            episode_path = os.path.join(self.data_dir, episode_dir)
            for step_file in os.listdir(episode_path):
                if step_file.endswith('.pkl'):
                    step_files.append(os.path.join(episode_path, step_file))
        return step_files

    def __len__(self):
        """返回数据集中样本数量"""
        return len(self.step_files)

    def __getitem__(self, idx):
        """获取数据集中指定index的step数据"""
        step_file = self.step_files[idx]
        with open(step_file, 'rb') as f:
            step_data = pickle.load(f)

        # 提取窗口数据和mask
        window_data = step_data['data']  # 从step_data中获取窗口数据
        mask = step_data['mask']

        # 将数据转换成PyTorch张量
        window_data_tensor = self._convert_to_tensor(window_data, mask)
        mask_tensor = torch.tensor(mask, dtype=torch.bool)

        return window_data_tensor, mask_tensor

    def _convert_to_tensor(self, window_data, mask):
        """将窗口数据转换为PyTorch张量"""
        # 提取历史数据、当前数据和未来数据
        history_data = window_data[:self.history_step]  # 前history_step步的数据
        current_data = window_data[self.history_step]  # 当前步骤数据
        future_data = window_data[self.history_step + 1:]  # 从history_step后开始的未来数据

        current_ego = current_data['ego']
        current_pos = current_ego['position']
        current_heading = np.arctan2(current_ego['heading'][1], current_ego['heading'][0])

        def global_to_body(global_pos):
            """将全局坐标转换到当前车身坐标系"""
            dx = global_pos[0] - current_pos[0]
            dy = global_pos[1] - current_pos[1]
            x_body = dx * np.cos(current_heading) + dy * np.sin(current_heading)
            y_body = -dx * np.sin(current_heading) + dy * np.cos(current_heading)
            return [x_body, y_body]

        def global_to_body_vector(global_vec, heading):
            """将全局坐标系下的矢量（速度/加速度）转换到车身坐标系"""
            vx = global_vec[0]
            vy = global_vec[1]
            vx_body = vx * np.cos(heading) + vy * np.sin(heading)
            vy_body = -vx * np.sin(heading) + vy * np.cos(heading)
            return [vx_body, vy_body]

        processed_history = []
        for step in history_data + [current_data]:
            ego = step['ego'].copy()

            # 位置转换
            ego['position'] = global_to_body(ego['position'])

            # 航向转换
            step_heading = np.arctan2(ego['heading'][1], ego['heading'][0])
            rel_heading = step_heading - current_heading
            ego['heading'] = [np.cos(rel_heading), np.sin(rel_heading)]

            # 速度转换（全局速度 -> 车身坐标系速度）
            global_vel = ego['velocity']
            ego['velocity'] = global_to_body_vector(global_vel, current_heading)

            processed_history.append(ego)

        processed_future_pos = []

        for i in range(self.future_step):
            step_i = self.history_step + i + 1

            if mask[step_i]:
                ego = future_data[i]['ego']
                # 位置转换
                ego['position'] = global_to_body(ego['position'])
                processed_future_pos.append(ego['position'])
            else:
                if i == 0:
                    pos = current_pos
                else:
                    pos = processed_future_pos[i - 1]
                processed_future_pos.append(pos)

        tensor_data = {'obs_feats': {}, 'ego': {}}

        # 处理obs_feats字段
        obs_feats_tensor = {}
        for key in history_data[0]['obs_feats']:
            obs_feats_tensor[key] = torch.tensor(
                [step['obs_feats'][key] for step in history_data + [current_data]],
                dtype=torch.float32
            )
        tensor_data['obs_feats'] = obs_feats_tensor

        # 处理ego字段
        for key in ['position', 'velocity', 'heading']:
            values = [step[key] for step in processed_history]
            tensor = torch.tensor(values, dtype=torch.float32)
            tensor_data['ego'][key] = tensor

        # 从每个字典中提取 "mean"
        mean_list = current_data["mean"]  # list of (20, 2) ndarrays

        # 转换为 numpy 数组，再转为 tensor
        mean_array = np.array(mean_list)  # shape: (20, 20, 2)
        mean_tensor = torch.from_numpy(mean_array).float()  # torch.Size([20, 20, 2])

        tensor_data['mean'] = mean_tensor
        # 从每个字典中提取 "mean"
        sigma_list = current_data["sigma"]  # list of (20, 2) ndarrays

        # 转换为 numpy 数组，再转为 tensor
        sigma_array = np.array(sigma_list)  # shape: (20, 20, 2)
        sigma_tensor = torch.from_numpy(sigma_array).float()  # torch.Size([20, 20, 2])

        tensor_data['sigma'] = sigma_tensor

        traj_array = np.array(processed_future_pos)
        traj_tensor = torch.from_numpy(traj_array).float()
        tensor_data['traj'] = traj_tensor
        return tensor_data


if __name__ == '__main__':
    # 用法示例：
    def create_dataloader(data_dir, history_step, future_step, batch_size=32, shuffle=True):
        # 创建StepDataset
        dataset = DPODataset(data_dir=data_dir, history_step=history_step, future_step=future_step)

        # 创建DataLoader
        dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)

        return dataloader


    cfg = get_my_global_config()
    # 示例：创建dataloader
    data_dir = "human_steps"  # 数据目录
    history_step = cfg['base']['history_step']
    future_step = cfg['base']['future_step']
    dataloader = create_dataloader(data_dir, history_step, future_step)

    # 迭代dataloader并打印数据
    for i, (window_data, mask) in enumerate(dataloader):
        print(f"Batch {i + 1}:")
        print("Window data:", window_data)
        print("Mask:", mask)
