import math
import torch.nn.functional as F
import torch
import torch.nn as nn


class TrajectoryDecoder(nn.Module):
    def __init__(self, embed_dim=128, nhead=4, num_layers=3, hidden_dim=512, future_step=20):
        super().__init__()
        decoder_layer = nn.TransformerDecoderLayer(
            d_model=embed_dim,
            nhead=nhead,
            dim_feedforward=hidden_dim,
            batch_first=True
        )
        self.decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)

        # 轨迹预测头，输出均值和标准差（4个参数）
        self.traj_head = nn.Sequential(
            nn.Linear(embed_dim, hidden_dim),
            nn.GELU(),
            nn.LayerNorm(hidden_dim),
            nn.Linear(hidden_dim, 4)
        )

        self.register_buffer("pos_emb", self._init_pos_emb(50, embed_dim))

        self.future_step = future_step

        # 查询向量
        self.query_embed = nn.Parameter(torch.randn(future_step, embed_dim))
        nn.init.orthogonal_(self.query_embed)

    def _init_pos_emb(self, max_len, d_model):
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(
            torch.arange(0, d_model, 2).float() *
            (-math.log(10000.0) / d_model))  # 调整温度系数
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = F.normalize(pe, dim=1)  # 添加归一化
        return pe

    def forward(self, encoder_out):
        B, T, _ = encoder_out.shape

        tgt = self.query_embed.unsqueeze(0).expand(B, -1, -1)  # [B, future_step, embed_dim]
        tgt = tgt + self.pos_emb[:self.future_step]

        tgt_mask = nn.Transformer.generate_square_subsequent_mask(self.future_step).to(encoder_out.device)

        decoder_out = self.decoder(
            tgt=tgt,
            memory=encoder_out,
            tgt_mask=tgt_mask
        )  # [B, future_step, embed_dim]

        traj_params = self.traj_head(decoder_out)  # [B, future_step, 4]

        mu = traj_params[..., :2]  # 均值 (B, future_step, 2)
        sigma_raw = traj_params[..., 2:]  # 标准差原始值 (B, future_step, 2)
        sigma = torch.nn.functional.softplus(sigma_raw) + 1e-6  # 保证正值

        return mu, sigma


class PointEncoder(nn.Module):
    def __init__(self, cfg, hidden_dim=4, num_heads=4, output_dim=128, num_layers=4):
        super(PointEncoder, self).__init__()
        self.cfg = cfg

        # 各自输入是标量，所以input_dim=1
        self.side_encoder = nn.Sequential(
            nn.Linear(1, hidden_dim),
            nn.LayerNorm(hidden_dim),  # 先归一化
            nn.GELU()  # 再激活
        )
        self.lane_encoder = nn.Sequential(
            nn.Linear(1, hidden_dim),
            nn.LayerNorm(hidden_dim),  # 先归一化
            nn.GELU()  # 再激活
        )
        self.lidar_encoder = nn.Sequential(
            nn.Linear(1, hidden_dim),
            nn.LayerNorm(hidden_dim),  # 先归一化
            nn.GELU()  # 再激活
        )

        # 来源Embedding：边线是0，路径是1，雷达是2
        self.source_embedding = nn.Embedding(3, hidden_dim)

        encoder_layer = nn.TransformerEncoderLayer(
            d_model=hidden_dim,
            nhead=num_heads,
            batch_first=True,
            dropout=0.1,  # 添加 Dropout
            activation='gelu'  # 确保与编码器激活函数一致
        )

        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        # 动态下采样
        self.downsample = nn.Linear(
            cfg['obs_dims']['sider_state'] +
            cfg['obs_dims']['lane_state'] +
            cfg['obs_dims']['lidar_state'],
            cfg['point_encoder']['down_sample_dim']
        )
        # 添加全连接层来调整输出维度
        self.final_fc = nn.Sequential(
            nn.Linear(hidden_dim * cfg['point_encoder']['down_sample_dim'],
                      output_dim),
            nn.GELU(),
            nn.LayerNorm(output_dim),
        )
        # 在特征相加后添加LayerNorm
        self.feat_norm = nn.LayerNorm(hidden_dim)

    def forward(self, side_data, lane_data, lidar_data):
        """
        side_data: (B,t 120)
        lane_data: (B,t 120)
        lidar_data: (B,t 240)
        """
        B, T, _ = side_data.shape

        # reshape到 (B*T, N)
        side_data = side_data.reshape(B * T, 120)
        lane_data = lane_data.reshape(B * T, 120)
        lidar_data = lidar_data.reshape(B * T, 240)

        # Expand最后一维，变成 (B*T ,N, 1)
        side_data = side_data.unsqueeze(-1)
        lane_data = lane_data.unsqueeze(-1)
        lidar_data = lidar_data.unsqueeze(-1)

        # 分别编码
        side_feats = self.side_encoder(side_data)  # (B*T, 120, hidden_dim)

        lane_feats = self.lane_encoder(lane_data)  # (B*T, 120, hidden_dim)
        lidar_feats = self.lidar_encoder(lidar_data)  # (B*T, 240, hidden_dim)

        # 拼接
        feats = torch.cat([side_feats, lane_feats, lidar_feats], dim=1)  # (B*T, 480, hidden_dim)

        # 准备来源标签
        side_source = torch.zeros(B * T, self.cfg['obs_dims']['sider_state'], dtype=torch.long,
                                  device=side_data.device)  # 0
        lane_source = torch.ones(B * T, self.cfg['obs_dims']['lane_state'], dtype=torch.long,
                                 device=lane_data.device)  # 1
        lidar_source = torch.ones(B * T, self.cfg['obs_dims']['lidar_state'], dtype=torch.long,
                                  device=lidar_data.device) * 2  # 2

        source_ids = torch.cat([side_source, lane_source, lidar_source], dim=1)  # (B, 480)

        # 加上来源Embedding
        source_embeds = self.source_embedding(source_ids)  # (B, 480, hidden_dim)

        feats = feats + source_embeds
        feats = self.feat_norm(feats)  # 归一化

        feats = feats.transpose(1, 2)  # (B*T, hidden_dim, 480)
        feats = self.downsample(feats)  # (B*T, hidden_dim, 120)
        feats = feats.transpose(1, 2)  # (B*T, 120, hidden_dim)

        feats = feats.to(dtype=torch.float32)
        # feats = feats.half()

        point_encoder = self.transformer_encoder(feats)

        # point_encoder = self.transformer_encoder(feats)  # (B*T, 120, hidden_dim)
        point_features = point_encoder.view(B, T, -1)  # (B,T, 120* hidden_dim)
        point_features = self.final_fc(point_features)
        return point_features


class CrossAttentionLayer(nn.Module):
    def __init__(self, embed_dim, num_heads):
        super(CrossAttentionLayer, self).__init__()
        # 定义自注意力层，输入维度是 hidden_dim，num_heads 是头数
        self.attn = nn.MultiheadAttention(embed_dim=embed_dim, num_heads=num_heads, batch_first=True)
        self.attn_norm = nn.LayerNorm(embed_dim)

    def forward(self, ego_feature, point_feature):
        """
        :param ego_feature: [B, T, 128] 当前车辆的特征
        :param point_feature: [B, T, 128] 周围点云特征
        :return: 融合后的特征
        """
        # 通过交叉注意力机制，ego_feature 作为 Query，point_feature 作为 Key 和 Value
        attn_output, _ = self.attn(ego_feature, point_feature, point_feature)  # 输出: [B, T, 128]
        attn_output = self.attn_norm(attn_output)
        return attn_output


class EgoEncoder(nn.Module):
    def __init__(self, ego_dim, other_v_dim, hidden_dim, num_heads, embed_dim):
        super().__init__()
        self.fc_ego = nn.Sequential(
            nn.Linear(ego_dim, hidden_dim),
            nn.GELU(),
            nn.LayerNorm(hidden_dim))
        self.fc_other_v = nn.Sequential(
            nn.Linear(other_v_dim, hidden_dim),
            nn.GELU(),
            nn.LayerNorm(hidden_dim))

        # 参数初始化
        for layer in [self.fc_ego, self.fc_other_v]:
            nn.init.kaiming_normal_(layer[0].weight)
        nn.init.zeros_(layer[0].bias)

        self.attn = nn.MultiheadAttention(
            hidden_dim * 2, num_heads, batch_first=True)
        self.fc_out = nn.Sequential(
            nn.Linear(hidden_dim * 2, embed_dim),
            nn.GELU(),
            nn.LayerNorm(embed_dim))

    def forward(self, ego_data, other_v_data):
        # ego_data 和 other_v_data 的形状: [B, T, D]
        # 处理ego特征（关键修改3：线性层 → 归一化 → ReLU）
        ego_feat = self.fc_ego(ego_data)
        # 处理other_v特征（同上）
        other_v_feat = self.fc_other_v(other_v_data)
        # 将 ego_data 和 other_v_data 特征拼接在一起
        combined_input = torch.cat([ego_feat, other_v_feat], dim=-1)  # [B, T, hidden_dim * 2]

        # 使用自注意力机制处理拼接后的特征
        attn_output, _ = self.attn(combined_input, combined_input, combined_input)  # [B, T, hidden_dim * 2]

        # 输出回原始的 ego 数据维度
        output = self.fc_out(attn_output)  # [B, T, ego_dim]
        return output


class TimeLayer(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_heads, num_layers, max_len=200):
        super(TimeLayer, self).__init__()

        # 位置编码（Positional Encoding）
        self.positional_encoding = self.get_positional_encoding(max_len, input_dim)

        # Transformer Encoder
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=input_dim,
            nhead=num_heads,
        )

        self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)

    def get_positional_encoding(self, max_len, d_model):
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)

        # 更稳定的计算方式
        div_term = torch.exp(
            torch.arange(0, d_model, 2).float() *
            (-math.log(10000.0) / d_model))

        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        return pe.unsqueeze(0)  # [1, max_len, d_model]

    def forward(self, x, mask=None):
        """
        :param x: 输入数据，形状 [B, T, 128]
        :param mask: 掩码，形状 [B, T]，指示哪些时间步是有效的
        :return: 时序关系处理后的输出，形状 [B, T, 128]
        """
        B, T, D = x.shape

        # 添加位置编码
        x = x + self.positional_encoding[:, :T, :].to(x.device)

        # 转换为 [T, B, D] 形状以符合 Transformer 的输入格式
        x = x.transpose(0, 1)  # [T, B, 128]

        # 如果有掩码，应用到 Transformer
        output = self.transformer_encoder(x, src_key_padding_mask=~mask)

        # 转回原始形状 [B, T, 128]
        output = output.transpose(0, 1)

        return output


class SpatioTemporalFusion(nn.Module):
    def __init__(self, embed_dim, num_heads):
        super().__init__()
        self.temporal_attn = nn.MultiheadAttention(
            embed_dim, num_heads, batch_first=True
        )
        self.spatial_attn = nn.MultiheadAttention(
            embed_dim, num_heads, batch_first=True
        )
        self.norm = nn.LayerNorm(embed_dim)

    def forward(self, x):
        # 时序注意力
        temporal, _ = self.temporal_attn(x, x, x)
        # 空间注意力
        spatial, _ = self.spatial_attn(temporal, temporal, temporal)
        return self.norm(x + temporal + spatial)


class ILformerModel(nn.Module):
    def __init__(self,
                 cfg,
                 hidden_dim=64,
                 nhead=4,
                 num_layers=2,
                 future_step=20
                 ):
        super(ILformerModel, self).__init__()
        ego_dim = cfg['ego_dims']
        other_dim = cfg['other_dims']

        self.point_encoder = PointEncoder(
            hidden_dim=cfg['point_encoder']['hidden_dim'],
            num_heads=cfg['point_encoder']['num_heads'],
            num_layers=num_layers,
            output_dim=cfg['embed_dim'],
            cfg=cfg
        )

        self.ego_encoder = EgoEncoder(
            ego_dim=ego_dim['ego_state']
                    + ego_dim['position']
                    + ego_dim['heading']
                    # + ego_dim['velocity']
                    + other_dim['navi_state'],
            other_v_dim=other_dim['other_v_state'],
            hidden_dim=hidden_dim,
            num_heads=nhead,
            embed_dim=cfg['embed_dim'],
        )
        self.cross_attention_layer = CrossAttentionLayer(
            embed_dim=cfg['embed_dim'],
            num_heads=nhead
        )

        self.time_layer = TimeLayer(
            input_dim=cfg['embed_dim'],
            hidden_dim=hidden_dim,
            num_heads=nhead,
            num_layers=num_layers,
        )

        self.traj_decoder = TrajectoryDecoder(
            embed_dim=cfg['embed_dim'],
            nhead=cfg['nhead'],
            num_layers=cfg['num_layers'],
            future_step=future_step,

        )
        self.fusion = SpatioTemporalFusion(cfg['embed_dim'], nhead)

    def forward(self, obs_data, ego_data, other_data, masks):
        # 特征编码
        point_feats = self.point_encoder(
            obs_data['sider_data'],
            obs_data['lane_data'],
            obs_data['lidar_data']
        )

        # Ego特征处理
        ego_feats = self.ego_encoder(
            torch.cat([
                ego_data['ego_data'],
                ego_data['position'],
                ego_data['heading'],
                # ego_data['velocity'],
                other_data['navi_data']
            ], dim=-1),
            other_data['other_v_data']
        )

        # 时空特征融合
        fused_feats = self.fusion(point_feats + ego_feats)
        time_feats = self.time_layer(fused_feats, masks)

        traj_mu, traj_sigma = self.traj_decoder(time_feats)
        return traj_mu, traj_sigma
