import math
from collections import OrderedDict

import numpy as np

from config.config import get_my_global_config
from env.env import EnhancedScenarioEnv
from feature_builder.feature_builder import FeatureBuilder
from model import ILformerModel
import colorsys
import torch
import pickle
from typing import Dict

from scipy.stats import multivariate_normal
import os


def init_model(cfg, device):
    future_step = cfg['base']['future_step']
    ckpt_path = "./ckpt/epoch=35-step=5688.ckpt"
    # ckpt_path = "./lightning_logs/version_6/checkpoints/epoch=226-step=4086.ckpt"
    ckpt = torch.load(ckpt_path, map_location=device)
    state = ckpt.get("state_dict", ckpt)

    model_config = cfg['model']
    model = ILformerModel(
        hidden_dim=model_config['hidden_dim'],
        nhead=model_config['nhead'],
        num_layers=model_config['num_layers'],
        cfg=model_config,
        future_step=future_step
    )

    new_state = OrderedDict()
    for k, v in state.items():
        name = k[len("model."):] if k.startswith("model.") else k
        new_state[name] = v
    model.load_state_dict(new_state, strict=False)
    model.eval()

    return model


def init_data(future_step=0, history_step=21):
    pad_data = {
        'obs_feats': {
            'sider_state': np.zeros(120),  # 假设sider_state有120个元素
            'ego_state': np.zeros(6),  # 假设ego_state有6个元素
            'lane_state': np.zeros(120),  # 假设lane_state有120个元素
            'navi_state': np.zeros(22),  # 假设navi_state有22个元素
            'other_v_state': np.zeros(40),  # 假设other_v_state有40个元素
            'lidar_state': np.zeros(240),  # 假设lidar_state有240个元素
        },
        'ego': {
            'velocity': np.zeros(2),  # 假设velocity是2维向量
            'heading': np.zeros(2),  # 假设heading是一个标量
            'position': np.zeros(2),  # 假设position是2维向量
        },

    }
    if future_step != 0:
        pad_data['expert_path'] = {
            'velocity': np.zeros((future_step, 2)),  # 假设velocity是2维向量
            'valid': np.zeros(future_step),  # 假设valid是标量
            'position': np.zeros((future_step, 2)),  # 假设position是2维向量
        }

        history_data = [pad_data] * history_step
        mask = [0] * history_step
        return history_data, mask
    else:
        pad_data['mean'] = np.zeros((20, 2))
        pad_data['sigma'] = np.zeros((20, 2))
        return pad_data


def body_to_global(body_points, ego_x, ego_y, heading_rad):
    """
    将车身坐标系下的点转换到全局坐标系
    参数：
        body_points : np.ndarray | list  # 车身坐标系下的点集，形状为(N,2)或单个点[2,]
        ego_x       : float             # 车辆当前全局x坐标
        ego_y       : float             # 车辆当前全局y坐标
        heading_rad : float             # 车辆当前航向角（弧度），逆时针方向为正
    返回：
        global_points : np.ndarray      # 全局坐标系下的点集，形状同输入
    """
    # 输入标准化处理
    points = np.asarray(body_points)
    if points.ndim == 1:
        points = points.reshape(1, -1)

    # 计算旋转矩阵
    cos_h = np.cos(heading_rad)
    sin_h = np.sin(heading_rad)
    rotation_matrix = np.array([[cos_h, -sin_h],
                                [sin_h, cos_h]])

    # 应用旋转和平移
    rotated = points.dot(rotation_matrix.T)
    global_points = rotated + np.array([ego_x, ego_y])

    return global_points.squeeze()  # 保持输出形状与输入一致


def generate_diversity_traj(traj_mu, traj_sigma, mode, future_step, smooth):
    N = future_step
    L = smooth

    # sigma_x和sigma_y
    sigma_xs = traj_sigma[..., 0]
    sigma_ys = traj_sigma[..., 1]

    cov = np.array([[sigma_xs ** 2, 0], [0, sigma_ys ** 2]])

    # 检查特征值
    eigvals = np.linalg.eigvals(cov)
    if np.any(eigvals < -1e-8):
        print("Warning: covariance matrix is not positive semidefinite!")

    # 构造完整均值向量
    mu = np.zeros(2 * N)
    mu[0::2] = traj_mu[:, 0]
    mu[1::2] = traj_mu[:, 1]

    trajectories = []
    probs = []

    mvn = multivariate_normal(mean=mu, cov=cov)

    for _ in range(mode):
        # 采样完整轨迹 (2N维)
        sample = mvn.rvs()
        # 计算轨迹概率密度
        prob = mvn.pdf(sample)

        # 重塑为N x 2的轨迹点
        traj = sample.reshape(N, 2)
        trajectories.append(traj)
        probs.append(prob)

    return trajectories, probs


def generate_high_contrast_colors(n, s=0.65, l=0.5, alpha: float = 1.0):
    """
    生成 n 个高对比度颜色，返回十六进制字符串列表
    s: 饱和度 (0~1)
    l: 亮度 (0~1)
    """
    colors = []
    for i in range(n):
        hue = i / n
        rgb = colorsys.hls_to_rgb(hue, l, s)
        rgba = (rgb[0], rgb[1], rgb[2], alpha)  # 返回 RGBA 格式
        colors.append(rgba)
    return colors


def tensor_to_numpy_no_batch(obs_data, ego_data, other_data):
    """将 process_data 返回的 Tensor 去掉第一个维度并转为 numpy"""

    def to_numpy(tensor):
        return tensor.squeeze(0).cpu().numpy() if isinstance(tensor, torch.Tensor) else tensor

    def convert(data_dict):
        return {key: to_numpy(value) for key, value in data_dict.items()}

    obs_data_np = convert(obs_data)
    ego_data_np = convert(ego_data)
    other_data_np = convert(other_data)

    return obs_data_np, ego_data_np, other_data_np


def process_data(window_data, ego, history_step):
    """将窗口数据转换为PyTorch张量"""
    # 提取历史数据、当前数据和未来数据
    history_data = window_data[:history_step]  # 前history_step步的数据

    current_data = window_data[history_step]  # 当前步骤数据

    current_pos = ego.position
    current_heading = np.arctan2(ego.heading[1], ego.heading[0])

    current_vel_global = ego.velocity  # 当前全局速度（用于相对速度计算）

    def global_to_body(global_pos):
        """将全局坐标转换到当前车身坐标系"""
        dx = global_pos[0] - current_pos[0]
        dy = global_pos[1] - current_pos[1]
        x_body = dx * np.cos(current_heading) + dy * np.sin(current_heading)
        y_body = -dx * np.sin(current_heading) + dy * np.cos(current_heading)
        return [x_body, y_body]

    def global_to_body_vector(global_vec, heading):
        """将全局坐标系下的矢量（速度/加速度）转换到车身坐标系"""
        vx = global_vec[0]
        vy = global_vec[1]
        vx_body = vx * np.cos(heading) + vy * np.sin(heading)
        vy_body = -vx * np.sin(heading) + vy * np.cos(heading)
        return [vx_body, vy_body]

    processed_history = []
    for step in history_data + [current_data]:
        ego = step['ego'].copy()

        # 位置转换
        ego['position'] = global_to_body(ego['position'])

        # 航向转换
        step_heading = np.arctan2(ego['heading'][1], ego['heading'][0])
        rel_heading = step_heading - current_heading
        ego['heading'] = [np.cos(rel_heading), np.sin(rel_heading)]

        # 速度转换（全局速度 -> 车身坐标系速度）
        global_vel = ego['velocity']
        ego['velocity'] = global_to_body_vector(global_vel, current_heading)

        processed_history.append(ego)

    processed_expert = []
    expert_path = current_data['expert_path']['position']
    for pos in expert_path:
        processed_expert.append(global_to_body(pos))

    # 处理专家速度（假设expert_path包含velocity字段）
    processed_expert_vel = []
    expert_vel_global = current_data['expert_path']['velocity']
    for vel in expert_vel_global:
        processed_expert_vel.append(
            global_to_body_vector(vel, current_heading)
        )

    tensor_data = {'obs_feats': {}, 'ego': {}, 'expert_path': {}}

    # 处理obs_feats字段
    obs_feats_tensor = {}
    for key in history_data[0]['obs_feats']:
        obs_feats_tensor[key] = torch.tensor(
            [step['obs_feats'][key] for step in history_data + [current_data]],
            dtype=torch.float32
        )
    tensor_data['obs_feats'] = obs_feats_tensor

    # 处理ego字段
    for key in ['position', 'velocity', 'heading']:
        values = [step[key] for step in processed_history]
        tensor = torch.tensor(values, dtype=torch.float32)

        # 归一化
        # if key == 'position':
        #     tensor /= pos_max_val if pos_max_val > 0 else 1.0
        # elif key == 'velocity':
        #     tensor /= vel_max_val if vel_max_val > 0 else 1.0
        tensor_data['ego'][key] = tensor

    # 处理专家路径（位置+速度）
    expert_pos_tensor = torch.tensor(processed_expert, dtype=torch.float32)  # / pos_max_val
    expert_vel_tensor = torch.tensor(processed_expert_vel, dtype=torch.float32)  # / vel_max_val
    tensor_data['expert_path']['position'] = expert_pos_tensor
    tensor_data['expert_path']['velocity'] = expert_vel_tensor
    tensor_data['expert_path']['valid'] = torch.tensor(current_data['expert_path']['valid'], dtype=torch.bool)

    window_data = tensor_data
    target_pos = window_data['expert_path']['position'].squeeze(1)
    target_vel = window_data['expert_path']['velocity'].squeeze(1)
    valid_mask = window_data['expert_path']['valid']  # .squeeze(1)

    obs_feats = window_data['obs_feats']
    ego_feats = window_data['ego']

    obs_data = {
        'sider_data': obs_feats['sider_state'].unsqueeze(0),
        'lane_data': obs_feats['lane_state'].unsqueeze(0),
        'lidar_data': obs_feats['lidar_state'].unsqueeze(0)
    }
    ego_data = {
        'ego_data': obs_feats['ego_state'].unsqueeze(0),
        'position': ego_feats['position'].unsqueeze(0),
        'heading': ego_feats['heading'].unsqueeze(0),
        'velocity': ego_feats['velocity'].unsqueeze(0)
    }
    other_data = {
        'navi_data': obs_feats['navi_state'].unsqueeze(0),
        'other_v_data': obs_feats['other_v_state'].unsqueeze(0)
    }
    return obs_data, ego_data, other_data


def process_episode_data(all_data, history_step, future_step):
    num_steps = len(all_data)
    windowed_data = []
    for i in range(num_steps):
        # 确定历史窗口的起始和未来窗口的结束位置
        history_start = max(0, i - history_step)
        future_end = min(num_steps, i + 1 + future_step)

        # 获取窗口内的数据
        history_data = all_data[history_start:i]  # 历史数据
        future_data = all_data[i + 1:future_end]  # 未来数据

        # 填充历史数据或未来数据，如果超出边界
        history_padding = history_step - len(history_data)
        future_padding = future_step - len(future_data)

        # 生成一个mask，标记填充的位置
        mask = [0] * history_padding + [1] * len(history_data) + [1] + [1] * len(future_data) + [0] * future_padding

        # 填充历史数据部分
        history_data = [init_data()] * history_padding + history_data
        # 填充未来数据部分
        future_data = future_data + [init_data()] * future_padding

        # 构建最终窗口（包括历史数据、当前数据和未来数据）
        window = history_data + [all_data[i]] + future_data

        windowed_data.append({'data': window, 'mask': mask})

    return windowed_data


def filter_data(data, mean, sigma):
    """
    用来处理数据，将tensor转为numpy，再过滤一下，方便保存。
    :param obs_data:
    :param ego_data:
    :param other_data:
    :param masks:
    :param mean:
    :param sigma:
    :return:
    """
    filtered_data = {}
    filtered_data['mean'] = mean
    filtered_data['sigma'] = sigma
    filtered_data['obs_feats'] = data['obs_feats']
    filtered_data['ego'] = data['ego']
    return filtered_data


def save_step_data(step_data: Dict, episode_dir: str, step_id: int):
    """保存每个step的数据到episode目录"""
    file_path = f"{episode_dir}/step{step_id}.pkl"
    with open(file_path, 'wb') as f:
        pickle.dump(step_data, f)


def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    save = True
    cfg = get_my_global_config()
    env_config = cfg['env']['base_config']
    env = EnhancedScenarioEnv(env_config)

    model = init_model(cfg, device)
    feature_builder = FeatureBuilder()

    history_step = cfg['base']['history_step']
    future_step = cfg['base']['future_step']
    mode = cfg['base']['mode']
    smooth = cfg['base']['smooth']

    for e in range(10):
        env.reset()
        feature_builder.clear_feature()
        feature_builder.set_env(env)

        history_data, mask = init_data(future_step)

        done = False

        all_data = []
        while not done:
            o, r, tm, tc, i = env.step([0, 0])
            done = tm or tc

            env.clear_drawer()
            data = feature_builder.get_feature(o)

            history_data.pop(0)
            history_data.append(data)
            mask.pop(0)
            mask.append(1)
            masks = torch.tensor(mask, dtype=torch.bool)
            masks = masks.unsqueeze(0)

            obs_data, ego_data, other_data = process_data(history_data, env.agent, history_step)

            traj_mu, traj_sigma = model(obs_data, ego_data, other_data, masks)

            traj_mu = torch.squeeze(traj_mu, 0).detach().numpy()
            traj_sigma = torch.squeeze(traj_sigma, 0).detach().numpy()

            # trajs, probs = generate_diversity_traj(traj_mu, traj_sigma, mode, future_step, smooth)
            # colors = generate_high_contrast_colors(mode)  # 高对比度配色

            current_pos = env.agent.position
            current_vel = env.agent.velocity
            current_heading = env.agent.heading
            heading = math.atan2(current_heading[1], current_heading[0])

            global_pos = body_to_global(traj_mu, current_pos[0], current_pos[1], heading)
            env.render_trajectory(global_pos)  # 绿色

            # for i in range(mode):
            #     traj_i = trajs[i]
            #     global_traj_i = body_to_global(traj_i, current_pos[0], current_pos[1], heading)
            #     env.render_trajectory(global_traj_i, point_color=colors[i], line_color=colors[i])
            #     print("轨迹概率",probs[i])

            all_data.append(filter_data(data, traj_mu, traj_sigma))
            if done and save:
                window_data = process_episode_data(all_data, history_step, future_step)
                episode_dir = f"human_steps/episode{e}"
                os.makedirs(episode_dir, exist_ok=True)  # Ensure episode directory exists
                for idx, data in enumerate(window_data):
                    save_step_data(data, episode_dir, idx)


if __name__ == '__main__':
    main()
