from typing import Tuple

import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.strategies import DDPStrategy
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics import MeanSquaredError
from scipy.stats import multivariate_normal
import matplotlib.patches as patches
from config.config import get_my_global_config

import matplotlib.pyplot as plt
from collections import OrderedDict
# 在代码中导入
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean

from dpo_datamoduel import DPODataModule
from model import ILformerModel
import torch
from torch.distributions import MultivariateNormal
import torch.distributions as dist


class DPOLightningModel(pl.LightningModule):
    def __init__(self, policy_model, reference_model, history_step, future_step):
        super().__init__()
        self.policy_model = self._freeze_encoder_except_decoder(policy_model)
        # self.policy_model = policy_model
        self.reference_model = self._freeze_model(reference_model)
        self.reference_model.eval()
        self.history_step = history_step
        self.future_step = future_step
        self.max_accel = 2
        self.max_curvature=5

    def _freeze_model(self, model):
        # 冻结modelA的参数
        for param in model.parameters():
            param.requires_grad = False

        return model

    def _freeze_encoder_except_decoder(self, model):
        for name, param in model.named_parameters():
            if not name.startswith("traj_decoder"):
                param.requires_grad = False
            else:
                param.requires_grad = True
        return model

    def _kinematic_constraint_loss(self, traj, dt=0.1):
        """运动学约束损失（修正维度问题）"""
        # 确保输入轨迹形状为 [B, T, 2]
        B, T, _ = traj.shape

        # 速度计算 (一阶差分)
        velocity = torch.diff(traj, dim=1) / dt  # [B, T-1, 2]

        # 加速度计算 (二阶差分)
        acceleration = torch.diff(velocity, dim=1) / dt  # [B, T-2, 2]
        accel_norm = torch.norm(acceleration, dim=2)  # [B, T-2]
        accel_violation = F.relu(accel_norm - self.max_accel)

        # 曲率计算（修正维度对齐问题）
        dx = torch.diff(traj[:, :, 0], dim=1)  # [B, T-1]
        dy = torch.diff(traj[:, :, 1], dim=1)  # [B, T-1]

        # 二阶导数（修正切片方式）
        ddx = torch.diff(dx, dim=1)  # [B, T-2]
        ddy = torch.diff(dy, dim=1)  # [B, T-2]

        # 对齐维度：取dx/dy的前T-2个元素
        dx = dx[:, :-1]  # [B, T-2]
        dy = dy[:, :-1]  # [B, T-2]

        # 曲率公式（修正维度问题）
        denominator = torch.pow(dx ** 2 + dy ** 2 + 1e-6, 1.5)
        curvature = (dx * ddy - dy * ddx) / denominator  # [B, T-2]
        curvature_violation = F.relu(torch.abs(curvature) - self.max_curvature)

        # 航向角变化约束（修正维度对齐）
        heading = torch.atan2(dy, dx)  # [B, T-2]
        heading_change = torch.diff(heading, dim=1)  # [B, T-3]
        heading_violation = F.relu(torch.abs(heading_change) - np.pi / 6)

        return (
                0.5 * accel_violation.mean() +
                0.3 * curvature_violation.mean() +
                0.2 * heading_violation.mean()
        )

    def _process_batch(self, batch):
        window_data, masks = batch
        mean = window_data['mean']
        sigma = window_data['sigma']
        traj = window_data['traj']

        obs_feats = window_data['obs_feats']
        ego_feats = window_data['ego']

        # 转换所有需要的数据为 float32
        obs_data = {
            'sider_data': obs_feats['sider_state'],
            'lane_data': obs_feats['lane_state'],
            'lidar_data': obs_feats['lidar_state']
        }
        ego_data = {
            'ego_data': obs_feats['ego_state'],
            'position': ego_feats['position'],
            'heading': ego_feats['heading'],
            'velocity': ego_feats['velocity']
        }
        other_data = {
            'navi_data': obs_feats['navi_state'],
            'other_v_data': obs_feats['other_v_state']
        }

        return mean, sigma, traj, obs_data, ego_data, other_data, masks

    def _preference_loss(
            self,
            policy_chosen_logps: torch.Tensor,
            policy_rejected_logps: torch.Tensor,
            reference_chosen_logps: torch.Tensor,
            reference_rejected_logps: torch.Tensor,
            beta: float,
            label_smoothing: float = 0.0,
            reference_free: bool = False
    ):
        """
        DPO损失函数核心实现

        返回:
            losses: (batch_size,) 每个样本的损失值
            chosen_rewards: (batch_size,) 优选回答的奖励值
            rejected_rewards: (batch_size,) 劣选回答的奖励值
        """
        pi_logratios = policy_chosen_logps - policy_rejected_logps
        ref_logratios = reference_chosen_logps - reference_rejected_logps

        if reference_free:
            ref_logratios = 0

        logits = pi_logratios - ref_logratios

        losses = -torch.nn.functional.logsigmoid(beta * logits) * (1 - label_smoothing) \
                 - torch.nn.functional.logsigmoid(-beta * logits) * label_smoothing

        chosen_rewards = beta * (policy_chosen_logps - reference_chosen_logps).detach()
        rejected_rewards = beta * (policy_rejected_logps - reference_rejected_logps).detach()

        return losses, chosen_rewards, rejected_rewards

    def _compute_trajectory_probability(self, delta_x, delta_y, traj, mean, sigma, future_mask, min_sigma=1e-4):

        B, T, _ = traj.shape
        delta_area = delta_x * delta_y

        sigma_clipped = torch.clamp(sigma, min=min_sigma)  # [B, T, 2]

        # 构造协方差矩阵: 对每个点，diag([sigma_x^2, sigma_y^2])
        # 结果形状: [B, T, 2, 2]
        cov_matrices = torch.zeros(B, T, 2, 2, device=traj.device, dtype=traj.dtype)
        var = sigma_clipped ** 2
        cov_matrices[:, :, 0, 0] = var[:, :, 0]
        cov_matrices[:, :, 1, 1] = var[:, :, 1]

        # 先reshape为 (B*T, 2), (B*T, 2, 2) 方便分布计算
        mean_flat = mean.reshape(B * T, 2)
        traj_flat = traj.reshape(B * T, 2)
        cov_flat = cov_matrices.reshape(B * T, 2, 2)
        mask_flat = future_mask.reshape(B * T)  # (B*T,)

        # 只计算有效点的log_prob
        valid_indices = mask_flat.nonzero(as_tuple=False).squeeze()
        valid_mean = mean_flat[valid_indices]
        valid_traj = traj_flat[valid_indices]
        valid_cov = cov_flat[valid_indices]

        mvn = dist.MultivariateNormal(valid_mean, covariance_matrix=valid_cov)
        log_probs = mvn.log_prob(valid_traj)  # (num_valid_points,)

        # 新建一个全零tensor，shape为 (B*T,), 把计算的log_prob放对应位置
        all_log_probs = torch.zeros(B * T, device=traj.device)
        all_log_probs[valid_indices] = log_probs

        # reshape回 (B, T)，然后按batch求和（累加时间步）
        log_probs_per_sample = all_log_probs.reshape(B, T).sum(dim=1)

        # 每个样本有效时间步数
        valid_counts = future_mask.sum(dim=1).clamp(min=1)  # 防止除0

        # 乘以每个有效时间步对应的面积log(delta_x * delta_y)
        log_probs_per_sample += valid_counts * torch.log(torch.tensor(delta_area, device=traj.device))

        return log_probs_per_sample  # 返回每个batch样本的轨迹log概率总和

    def _compute_loss(self, batch: Tuple, stage: str) -> torch.Tensor:
        mean, sigma, traj, obs_data, ego_data, other_data, masks = self._process_batch(batch)

        history_mask = masks[:, :self.history_step + 1]
        future_mask = masks[:, self.history_step + 1:]

        accept_traj = traj
        reject_traj = mean

        policy_mean, policy_sigma = self.policy_model(obs_data, ego_data, other_data, history_mask)
        with torch.no_grad():
            ref_mean, ref_sigma = self.reference_model(obs_data, ego_data, other_data, history_mask)
        delta_x = 0.1
        delta_y = 0.1

        policy_chosen_logps = self._compute_trajectory_probability(delta_x, delta_y, accept_traj,
                                                                   policy_mean,
                                                                   policy_sigma, future_mask)
        policy_rejected_logps = self._compute_trajectory_probability(delta_x, delta_y, reject_traj,
                                                                     policy_mean,
                                                                     policy_sigma, future_mask)
        reference_chosen_logps = self._compute_trajectory_probability(delta_x, delta_y, accept_traj,
                                                                      ref_mean,
                                                                      ref_sigma, future_mask)
        reference_rejected_logps = self._compute_trajectory_probability(delta_x, delta_y, reject_traj,
                                                                        ref_mean,
                                                                        ref_sigma, future_mask)

        pos_loss = F.mse_loss(
            accept_traj,
            policy_mean,
            reduction='none'
        ) * future_mask.unsqueeze(-1)
        pos_loss = pos_loss.sum() / (future_mask.sum() + 1e-6)

        dpo_losses, chosen_rewards, rejected_rewards = self._preference_loss(
            policy_chosen_logps, policy_rejected_logps,
            reference_chosen_logps, reference_rejected_logps,
            beta=0.5, label_smoothing=0.5, )

        # 计算奖励准确率（偏好正确）
        reward_accuracies = (chosen_rewards > rejected_rewards).float()

        margins = (chosen_rewards - rejected_rewards)

        # 新增运动学约束
        kinematic_loss = self._kinematic_constraint_loss(policy_mean)

        # 轨迹平滑性约束
        smooth_loss = F.mse_loss(policy_mean[:, 2:], policy_mean[:, :-2]) * 0.1

        # 组合损失
        total_loss = (
                dpo_losses.mean() +
                0.3 * pos_loss +
                0.5 * kinematic_loss +
                0.1 * smooth_loss
        )

        #  loss = dpo_losses.mean()# + pos_loss
        # 也可以记录几个标量的均值，方便快速观察训练趋势
        self.log_dict({
            f'{stage}/total_loss': total_loss,
            f'{stage}/pos_loss': pos_loss,
            f'{stage}/kinematic_loss': kinematic_loss,
            f'{stage}/smooth_loss': smooth_loss,
            f'{stage}/dpo_loss': dpo_losses.mean().item(),
            f'{stage}/chosen_rewards_mean': chosen_rewards.mean().item(),
            f'{stage}/rejected_rewards_mean': rejected_rewards.mean().item(),
            f'{stage}/reward_accuracies_mean': reward_accuracies.mean().item(),
            f'{stage}/margins_mean': margins.mean().item(),
            f'{stage}/policy_rejected_logps_mean': policy_rejected_logps.mean().item(),
        }, prog_bar=True)

        if hasattr(self.logger, "experiment"):
            if stage == 'train' and hasattr(self, 'trainer') and self.trainer.global_step % 10 == 0:
                idx = 0  # 取第一个样本

                tgt_body = traj[idx].detach().cpu().numpy()  # [T, 2]
                pred_traj = policy_mean[idx].detach().cpu().numpy()  # [T, 2]
                pred_sigma = policy_sigma[idx].detach().cpu().numpy()  # [T, 2], 标准差

                fig, ax = plt.subplots(figsize=(10, 10))

                # 车辆坐标轴
                ax.arrow(0, 0, 3, 0, head_width=0.5, head_length=0.8,
                         fc='#2c3e50', ec='#2c3e50', linewidth=3, label='Vehicle Heading')
                ax.arrow(0, 0, 0, 3, head_width=0.5, head_length=0.8,
                         fc='#3498db', ec='#3498db', linewidth=3, linestyle='dotted', label='Lateral')

                # 专家轨迹
                ax.plot(tgt_body[:, 0], tgt_body[:, 1], '--', color='#27ae60',
                        linewidth=4, markersize=10, marker='s', markevery=2,
                        markerfacecolor='white', markeredgecolor='#27ae60',
                        label='Target Trajectory')

                # 预测均值轨迹
                ax.plot(pred_traj[:, 0], pred_traj[:, 1],
                        color='#e67e22', linewidth=3,
                        marker='o', markevery=3, markersize=6,
                        label='Predicted Mean Trajectory')

                # 画每步的方差椭圆（1个标准差）
                for i in range(pred_traj.shape[0]):
                    x, y = pred_traj[i]
                    sigma_x, sigma_y = pred_sigma[i]

                    # 创建椭圆，1σ对应置信椭圆半径
                    ellipse = patches.Ellipse(
                        (x, y),
                        width=2 * sigma_x,  # 2*σ_x 长轴
                        height=2 * sigma_y,  # 2*σ_y 短轴
                        edgecolor='#e67e22',
                        facecolor='#e67e22',
                        alpha=0.2
                    )
                    ax.add_patch(ellipse)

                # 特殊点
                ax.scatter(0, 0, s=200, c='#e74c3c', marker='*',
                           edgecolor='black', zorder=10, label='Ego Position')
                ax.scatter(tgt_body[-1, 0], tgt_body[-1, 1], s=100,
                           c='#f1c40f', marker='X', edgecolor='black',
                           label='Final Target')

                # 样式
                ax.set_title(f'{stage} Trajectories in Ego Frame (Step {self.trainer.global_step})', pad=20)
                ax.set_xlabel('Longitudinal Distance (m)', labelpad=15)
                ax.set_ylabel('Lateral Distance (m)', labelpad=15)
                ax.legend(loc='upper right', fontsize=9, framealpha=0.95)

                ax.grid(True, linestyle='--', alpha=0.7)
                ax.set_axisbelow(True)
                ax.set_aspect('equal', adjustable='datalim')

                # 坐标范围
                all_x = np.concatenate([tgt_body[:, 0], pred_traj[:, 0]])
                all_y = np.concatenate([tgt_body[:, 1], pred_traj[:, 1]])
                buffer = 0.2
                ax.set_xlim(min(all_x) - buffer, max(all_x) + buffer)
                ax.set_ylim(min(all_y) - buffer, max(all_y) + buffer)

                # 方向罗盘
                ax.text(0.95, 0.95, 'N', transform=ax.transAxes,
                        ha='right', va='top', fontsize=14, color='#2c3e50')
                ax.text(0.95, 0.05, 'S', transform=ax.transAxes,
                        ha='right', va='bottom', fontsize=14, color='#2c3e50')

                plt.tight_layout()
                self.logger.experiment.add_figure(
                    f'{stage}_ego_frame_trajectory_with_variance',
                    fig,
                    global_step=self.trainer.global_step
                )
                plt.close(fig)

        return total_loss

    def training_step(self, batch: Tuple, batch_idx: int) -> torch.Tensor:
        return self._compute_loss(batch, 'train')

    # def validation_step(self, batch, batch_idx):
    #     """验证过程的单个批次"""
    #     return self._compute_loss(batch, 'val')
    #
    # def test_step(self, batch, batch_idx):
    #     """测试过程的单个批次"""
    #     return self._compute_loss(batch, 'test')

    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(
            self.policy_model.parameters(),
            lr=1e-4,  # 降低初始学习率
            weight_decay=0.01,  # 增强正则化
            betas=(0.9, 0.95)  # 调整动量参数
        )

        # 使用余弦退火调度
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=self.trainer.estimated_stepping_batches,
            eta_min=1e-6
        )

        return [optimizer], [scheduler]


def load_ckpt(ckpt_path, model, device):
    ckpt = torch.load(ckpt_path, map_location=device)
    state = ckpt.get("state_dict", ckpt)
    new_state = OrderedDict()
    for k, v in state.items():
        name = k[len("model."):] if k.startswith("model.") else k
        new_state[name] = v
    model.load_state_dict(new_state, strict=False)
    return model


if __name__ == '__main__':
    config = get_my_global_config()
    model_config = config['model']

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    data_module = DPODataModule(
        data_dir="human_steps",
        batch_size=config['trainer']['batch_size'],
        num_episodes=config['trainer']['num_episodes'],
        future_step=config['base']['future_step'],
        history_step=config['base']['history_step'],
        pos_max_val=config['trainer']['pos_max_val'],
        vel_max_val=config['trainer']['vel_max_val'],
        test_split=config['trainer']['test_split'],
        val_split=config['trainer']['val_split']
    )
    policy_model = ILformerModel(
        hidden_dim=model_config['hidden_dim'],
        nhead=model_config['nhead'],
        num_layers=model_config['num_layers'],
        cfg=model_config,
        future_step=config['base']['future_step']
    )
    reference_model = ILformerModel(
        hidden_dim=model_config['hidden_dim'],
        nhead=model_config['nhead'],
        num_layers=model_config['num_layers'],
        cfg=model_config,
        future_step=config['base']['future_step']
    )

    ckpt_path = './ckpt/epoch=35-step=5688.ckpt'
    policy_model = load_ckpt(ckpt_path, policy_model, device)
    reference_model = load_ckpt(ckpt_path, reference_model, device)

    dpo_lit_model = DPOLightningModel(
        policy_model=policy_model,
        reference_model=reference_model,
        future_step=config['base']['future_step'],
        history_step=config['base']['history_step'],
    )
    trainer = pl.Trainer(
        max_epochs=config['trainer']['max_epochs'],
        accelerator='gpu',
        devices=[0],
        precision='32',
    )
    trainer.fit(dpo_lit_model, datamodule=data_module)
