import math

import numpy as np
from config.config import get_my_global_config

from env.env import EnhancedScenarioEnv
from feature_builder.feature_builder import FeatureBuilder
from model import ILformerModel

cfg = get_my_global_config()
env_config = cfg['env']['base_config']
# cfg['env']['base_config']['no_traffic'] = True
env = EnhancedScenarioEnv(env_config)
feature_builder = FeatureBuilder()
future_step = cfg['base']['future_step']
history_step = cfg['base']['history_step']
num_episodes = 20
pos_max_val = cfg['trainer']['pos_max_val']
vel_max_val = cfg['trainer']['vel_max_val']
model_config = cfg['model']

model = ILformerModel(
    hidden_dim=model_config['hidden_dim'],
    nhead=model_config['nhead'],
    num_layers=model_config['num_layers'],
    cfg=model_config,
    future_step=future_step
)

pad_data = {
    'obs_feats': {
        'sider_state': np.zeros(120),  # 假设sider_state有120个元素
        'ego_state': np.zeros(6),  # 假设ego_state有6个元素
        'lane_state': np.zeros(120),  # 假设lane_state有120个元素
        'navi_state': np.zeros(22),  # 假设navi_state有22个元素
        'other_v_state': np.zeros(40),  # 假设other_v_state有40个元素
        'lidar_state': np.zeros(240),  # 假设lidar_state有240个元素
    },
    'ego': {
        'velocity': np.zeros(2),  # 假设velocity是2维向量
        'heading': np.zeros(2),  # 假设heading是一个标量
        'position': np.zeros(2),  # 假设position是2维向量
    },
    'expert_path': {
        'velocity': np.zeros((future_step, 2)),  # 假设velocity是2维向量
        'valid': np.zeros(future_step),  # 假设valid是标量
        'position': np.zeros((future_step, 2)),  # 假设position是2维向量
    }
}
import torch


def convert_to_tensor(window_data, ego):
    """将窗口数据转换为PyTorch张量"""
    # 提取历史数据、当前数据和未来数据
    history_data = window_data[:history_step]  # 前history_step步的数据

    current_data = window_data[history_step]  # 当前步骤数据

    current_pos = ego.position
    current_heading = np.arctan2(ego.heading[1], ego.heading[0])

    current_vel_global = ego.velocity  # 当前全局速度（用于相对速度计算）

    def global_to_body(global_pos):
        """将全局坐标转换到当前车身坐标系"""
        dx = global_pos[0] - current_pos[0]
        dy = global_pos[1] - current_pos[1]
        x_body = dx * np.cos(current_heading) + dy * np.sin(current_heading)
        y_body = -dx * np.sin(current_heading) + dy * np.cos(current_heading)
        return [x_body, y_body]

    def global_to_body_vector(global_vec, heading):
        """将全局坐标系下的矢量（速度/加速度）转换到车身坐标系"""
        vx = global_vec[0]
        vy = global_vec[1]
        vx_body = vx * np.cos(heading) + vy * np.sin(heading)
        vy_body = -vx * np.sin(heading) + vy * np.cos(heading)
        return [vx_body, vy_body]

    processed_history = []
    for step in history_data + [current_data]:
        ego = step['ego'].copy()

        # 位置转换
        ego['position'] = global_to_body(ego['position'])

        # 航向转换
        step_heading = np.arctan2(ego['heading'][1], ego['heading'][0])
        rel_heading = step_heading - current_heading
        ego['heading'] = [np.cos(rel_heading), np.sin(rel_heading)]

        # 速度转换（全局速度 -> 车身坐标系速度）
        global_vel = ego['velocity']
        ego['velocity'] = global_to_body_vector(global_vel, current_heading)

        processed_history.append(ego)

    processed_expert = []
    expert_path = current_data['expert_path']['position']
    for pos in expert_path:
        processed_expert.append(global_to_body(pos))

    # 处理专家速度（假设expert_path包含velocity字段）
    processed_expert_vel = []
    expert_vel_global = current_data['expert_path']['velocity']
    for vel in expert_vel_global:
        processed_expert_vel.append(
            global_to_body_vector(vel, current_heading)
        )

    tensor_data = {'obs_feats': {}, 'ego': {}, 'expert_path': {}}

    # 处理obs_feats字段
    obs_feats_tensor = {}
    for key in history_data[0]['obs_feats']:
        obs_feats_tensor[key] = torch.tensor(
            [step['obs_feats'][key] for step in history_data + [current_data]],
            dtype=torch.float32
        )
    tensor_data['obs_feats'] = obs_feats_tensor

    # 处理ego字段
    for key in ['position', 'velocity', 'heading']:
        values = [step[key] for step in processed_history]
        tensor = torch.tensor(values, dtype=torch.float32)

        # 归一化
        # if key == 'position':
        #     tensor /= pos_max_val if pos_max_val > 0 else 1.0
        # elif key == 'velocity':
        #     tensor /= vel_max_val if vel_max_val > 0 else 1.0
        tensor_data['ego'][key] = tensor

    # 处理专家路径（位置+速度）
    expert_pos_tensor = torch.tensor(processed_expert, dtype=torch.float32)  # / pos_max_val
    expert_vel_tensor = torch.tensor(processed_expert_vel, dtype=torch.float32)  # / vel_max_val
    tensor_data['expert_path']['position'] = expert_pos_tensor
    tensor_data['expert_path']['velocity'] = expert_vel_tensor
    tensor_data['expert_path']['valid'] = torch.tensor(current_data['expert_path']['valid'], dtype=torch.bool)

    return tensor_data


from collections import OrderedDict

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

ckpt_path = "./ckpt/epoch=34-step=5530.ckpt"
ckpt = torch.load(ckpt_path, map_location=device)
state = ckpt.get("state_dict", ckpt)

new_state = OrderedDict()
for k, v in state.items():
    name = k[len("model."):] if k.startswith("model.") else k
    new_state[name] = v
model.load_state_dict(new_state, strict=False)
model.eval()


def body_to_global(body_points, ego_x, ego_y, heading_rad):
    """
    将车身坐标系下的点转换到全局坐标系
    参数：
        body_points : np.ndarray | list  # 车身坐标系下的点集，形状为(N,2)或单个点[2,]
        ego_x       : float             # 车辆当前全局x坐标
        ego_y       : float             # 车辆当前全局y坐标
        heading_rad : float             # 车辆当前航向角（弧度），逆时针方向为正
    返回：
        global_points : np.ndarray      # 全局坐标系下的点集，形状同输入
    """
    # 输入标准化处理
    points = np.asarray(body_points)
    if points.ndim == 1:
        points = points.reshape(1, -1)

    # 计算旋转矩阵
    cos_h = np.cos(heading_rad)
    sin_h = np.sin(heading_rad)
    rotation_matrix = np.array([[cos_h, -sin_h],
                                [sin_h, cos_h]])

    # 应用旋转和平移
    rotated = points.dot(rotation_matrix.T)
    global_points = rotated + np.array([ego_x, ego_y])

    return global_points.squeeze()  # 保持输出形状与输入一致


def body_to_global_velocity(body_velocity, heading_rad):
    """
    将车身坐标系下的速度转换到全局坐标系
    参数：
        body_velocity : np.ndarray | list  # 速度矢量 [vx_body, vy_body] 或批量速度 (N,2)
        heading_rad   : float             # 车辆航向角（弧度）
    返回：
        global_velocity : np.ndarray      # 全局坐标系下的速度矢量
    """
    v_body = np.asarray(body_velocity)
    if v_body.ndim == 1:
        v_body = v_body.reshape(1, -1)

    cos_h = np.cos(heading_rad)
    sin_h = np.sin(heading_rad)
    rotation_matrix = np.array([[cos_h, -sin_h],
                                [sin_h, cos_h]])

    v_global = v_body.dot(rotation_matrix.T)
    return v_global.squeeze()


import numpy as np


def generate_diversity_traj(traj_mu, traj_sigma, mode, future_step):
    sigma_ys = traj_sigma[..., 1]
    sigma_xs = traj_sigma[..., 0]
    trajectories = []
    L = 5  # 用于协方差的平滑度参数
    N = future_step  # 生成的轨迹步数

    # 生成y坐标的协方差矩阵（带时间相关性）
    cov_y = np.zeros((N, N))
    for i in range(N):
        for j in range(N):
            cov_y[i, j] = sigma_ys[i] * sigma_ys[j] * np.exp(-(i - j) ** 2 / (2 * L ** 2))

    # 为每个模式生成轨迹
    for _ in range(mode):
        # 生成y坐标（带相关性）
        y_samples = np.random.multivariate_normal(mean=traj_mu[:, 1], cov=cov_y)  # 使用traj_mu作为均值

        # 生成x坐标（独立采样）
        x_samples = np.random.normal(loc=traj_mu[:, 0], scale=sigma_xs)  # 使用traj_mu作为均值

        # 组合轨迹
        traj = np.column_stack((x_samples, y_samples))
        trajectories.append(traj)

    return trajectories


import colorsys


def generate_high_contrast_colors(n, s=0.65, l=0.5, alpha: float = 1.0):
    """
    生成 n 个高对比度颜色，返回十六进制字符串列表
    s: 饱和度 (0~1)
    l: 亮度 (0~1)
    """
    colors = []
    for i in range(n):
        hue = i / n
        rgb = colorsys.hls_to_rgb(hue, l, s)
        rgba = (rgb[0], rgb[1], rgb[2], alpha)  # 返回 RGBA 格式
        colors.append(rgba)
    return colors


with torch.no_grad():
    for e in range(num_episodes):
        env.reset()
        feature_builder.clear_feature()
        feature_builder.set_env(env)
        done = False
        history_data = [pad_data] * 21
        mask = [0] * 21
        mode = 20
        global_pos = []
        global_vel = []

        while not done:
            # action = env.action_space.sample()

            action = np.array([0, 0], dtype=np.float32)
            o, r, tm, tc, i = env.step(action)
            data = feature_builder.get_feature(o)

            step = env.episode_step

            # 获取当前步骤和前history_step的数据
            history_data.pop(0)
            history_data.append(data)
            mask.pop(0)
            mask.append(1)
            masks = torch.tensor(mask, dtype=torch.bool)
            masks = masks.unsqueeze(0)
            # 你可以在这里使用pro_data和mask进行后续处理
            tensor_data = convert_to_tensor(history_data, env.agent)
            window_data = tensor_data
            target_pos = window_data['expert_path']['position'].squeeze(1)
            target_vel = window_data['expert_path']['velocity'].squeeze(1)
            valid_mask = window_data['expert_path']['valid']  # .squeeze(1)

            obs_feats = window_data['obs_feats']
            ego_feats = window_data['ego']

            obs_data = {
                'sider_data': obs_feats['sider_state'].unsqueeze(0),
                'lane_data': obs_feats['lane_state'].unsqueeze(0),
                'lidar_data': obs_feats['lidar_state'].unsqueeze(0)
            }
            ego_data = {
                'ego_data': obs_feats['ego_state'].unsqueeze(0),
                'position': ego_feats['position'].unsqueeze(0),
                'heading': ego_feats['heading'].unsqueeze(0),
                'velocity': ego_feats['velocity'].unsqueeze(0)
            }
            other_data = {
                'navi_data': obs_feats['navi_state'].unsqueeze(0),
                'other_v_data': obs_feats['other_v_state'].unsqueeze(0)
            }

            traj_mu, traj_sigma = model(obs_data, ego_data, other_data, masks)
            traj_mu = (torch.squeeze(traj_mu, 0)).numpy()
            traj_sigma = (torch.squeeze(traj_sigma, 0)).numpy()
            trajs = generate_diversity_traj(traj_mu, traj_sigma, mode, future_step)
            colors = generate_high_contrast_colors(mode)  # 高对比度配色

            current_pos = env.agent.position
            current_vel = env.agent.velocity
            current_heading = env.agent.heading
            heading = math.atan2(current_heading[1], current_heading[0])

            global_pos = body_to_global(traj_mu, current_pos[0], current_pos[1], heading)

            global_target_pos = body_to_global(target_pos, current_pos[0], current_pos[1], heading)
            env.clear_drawer()

            env.render_trajectory(traj_mu, point_color=(0.0, 0.0, 1.0, 0.7), line_color=(0.0, 0.0, 1.0, 0.7))  # 蓝色
            # env.render_trajectory(target_pos, point_color=(1.0, 0.0, 0.0, 0.7), line_color=(1.0, 0.0, 0.0, 0.7))  # 红色
            env.render_trajectory(global_pos)  # 绿色
            # env.render_trajectory(global_target_pos, point_color=(1.0, 0.0, 1.0, 0.7),
            #                       line_color=(1.0, 0.0, 1.0, 0.7))  # 紫色
            next_pos = global_pos[0, :]  # * pos_max_val

            for i in range(mode):
                traj_i = trajs[i]
                global_traj_i = body_to_global(traj_i, current_pos[0], current_pos[1], heading)
                env.render_trajectory(global_traj_i, point_color=colors[i], line_color=colors[i])  # 蓝色
            # env.render_point(next_pos, point_color=(1.0, 0.0, 1.0, 0.7))
            # env.agent.set_position(next_pos)
            # env.agent.set_velocity(next_vel)

            done = tm or tc
