import torch
from torch import nn
from model.transformer_torch import Transformer

class Transfollower(nn.Module):
    def __init__(self, config, enc_in = 61, dec_in = 61, d_model = 256, num_encoder_layers = 4, num_decoder_layers = 4):
        super(Transfollower, self).__init__()#这里的7是前车距离 四个横向数据加两个掩码和自车横向速度
        self.transformer = Transformer(d_model= d_model, nhead=8, num_encoder_layers=num_encoder_layers,
                                   num_decoder_layers=num_decoder_layers, dim_feedforward=2048,
                                   dropout=0.1, activation='relu', custom_encoder=None,
                                   custom_decoder=None, layer_norm_eps=1e-05, batch_first=True, 
                                   device=None, dtype=None)
        self.enc_emb = nn.Linear(enc_in, d_model)#升维到256
        self.dec_emb = nn.Linear(dec_in, d_model)
        self.out_proj = nn.Linear(d_model, 1, bias = True)#输出的维数和列
        self.settings = config
        
        self.enc_positional_embedding = nn.Embedding(self.settings.SEQ_LEN, d_model)
        ##位置嵌入将输入序列的每个位置编码为一个固定的向量 为0-49的位置各生成一个256维的向量
        self.dec_positional_embedding = nn.Embedding(self.settings.PRED_LEN + self.settings.LABEL_LEN, d_model)
        ##位置嵌入将输入序列的每个位置编码为一个固定的向量 为0-149的位置各生成一个256维的向量
        nn.init.normal_(self.enc_emb.weight, 0, .02)
        nn.init.normal_(self.dec_emb.weight, 0, .02)
        nn.init.normal_(self.out_proj.weight, 0, .02)
        nn.init.normal_(self.enc_positional_embedding.weight, 0, .02)
        nn.init.normal_(self.dec_positional_embedding.weight, 0, .02)
        #对权重初始化

    def forward(self, enc_inp, dec_inp):
        enc_pos = torch.arange(0, enc_inp.shape[1], dtype=torch.long).to(enc_inp.device)
        #给位置编号 0-49
        dec_pos = torch.arange(0, dec_inp.shape[1], dtype=torch.long).to(dec_inp.device)
        enc_inp = self.enc_emb(enc_inp) + self.enc_positional_embedding(enc_pos)[None,:,:]
        # (256,50,256)+(enc_inp.shape[1]应该是50  给0-50随机生成256维向量)(50,256)  [在第一位加上一个维度]
        #--------(1,50,256) 广播机制进行传播详情见日志输出为(256,50,256)
        dec_inp = self.dec_emb(dec_inp) + self.dec_positional_embedding(dec_pos)[None,:,:]
        
        out, enc_attns, dec_attns, enc_dec_attns = self.transformer(enc_inp, dec_inp)
        #out是(256,137,256)
        out = self.out_proj(out)
        #输出是(256,137,2)
        return out[:,-self.settings.PRED_LEN:,:], enc_attns, dec_attns, enc_dec_attns

MAX_SPD = 25 
class lstm_model(nn.Module):
    def __init__(self, config, input_size = 61, hidden_size = 256, lstm_layers = 1, dropout = 0):
        super(lstm_model, self).__init__()
        self.encoder = nn.LSTM(input_size, hidden_size, lstm_layers, batch_first = True, dropout = dropout)
        self.decoder = nn.LSTM(input_size, hidden_size, lstm_layers, batch_first = True, dropout = dropout)
        self.linear = nn.Linear(hidden_size, 1)

        nn.init.normal_(self.linear.weight, 0, .02)
        nn.init.constant_(self.linear.bias, 0.0)
        self.settings = config

    def forward(self, src, tgt):
        enc_x, (h_n, c_n) = self.encoder(src)
        dec_x, _ = self.decoder(tgt, (h_n, c_n))

        out = self.linear(dec_x)
        out = torch.tanh(out)*MAX_SPD/2 + MAX_SPD/2
        return out[:,-self.settings.PRED_LEN:,:]

# fully connected neural network
class nn_model(nn.Module):
    def __init__(self, config, input_size = 61, hidden_size = 256, dropout = 0.1):
        super(nn_model, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(input_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, 1)
            )
        self.settings = config

    # using sv speed and lv speed as input. Use 0 as placeholders for future sv speed.
    def forward(self, src):
        out = self.encoder(src)
        out = torch.tanh(out)*MAX_SPD/2 + MAX_SPD/2
        return out[:,-self.settings.PRED_LEN:,:]

class Trajectron(nn.Module):
    def __init__(self, config, input_dim = 2) -> None:
        super(Trajectron, self).__init__()
        self.his_encoder = nn.LSTM(input_dim, 32)
        self.interaction_encoder = nn.LSTM(input_dim, 8)
        self.output_layer = nn.Linear(40, 2)
        self.settings = config

    def forward(self, inputs, iftest = False):
        """
        inputs: [T, B, N, d]
        """
        T, B, _, _ = inputs.shape
        outputs = torch.zeros(T, B, 2).cuda()
        obs_length = self.settings.SEQ_LEN

        for framenum in range(T):
            nodes_current = inputs[:framenum + 1]

            if framenum >= obs_length and iftest:
                # Replace ground truth data of SV with prediction part.
                sv_pre = outputs[obs_length - 1:framenum]
                nodes_current[obs_length:, :, -1, :] = sv_pre

            # Only takes the most recent obs_length steps
            if len(nodes_current) > obs_length:
                nodes_current = nodes_current[-obs_length:]

            # encoding sv history
            _, (his_embedding, _) = self.his_encoder(nodes_current[:, :, -1, :])
            his_embedding = his_embedding[-1] # take the hidden state of the last lstm layer

            # encode interaction (here we only have the leading vehicle as the neighbor)
            _, (inter_embedding, _) = self.interaction_encoder(nodes_current[:, :, 0, :])
            inter_embedding = inter_embedding[-1] # take the last layer

            # concat the two embeddings
            comb_embedding = torch.cat([his_embedding, inter_embedding], axis = -1)

            outputs_current = self.output_layer(comb_embedding) # B, d
            outputs[framenum] = outputs_current

        return outputs
import torch
import torch.nn as nn
import torch.nn.functional as F
import math

# -------- Sinusoidal Positional Encoding（batch_first） --------
class SinePosEnc(nn.Module):
    def __init__(self, d_model, max_len=4096):
        super().__init__()
        pe = torch.zeros(max_len, d_model)
        pos = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
        denom = torch.exp(torch.arange(0, d_model, 2, dtype=torch.float32) * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(pos * denom)
        pe[:, 1::2] = torch.cos(pos * denom)
        self.register_buffer('pe', pe.unsqueeze(0), persistent=False)  # (1, max_len, d_model)

    def forward(self, x):  # x: (B, T, D)
        return x + self.pe[:, :x.size(1), :]

# -------- Depthwise Separable Conv1d block --------
class DwConv1d(nn.Module):
    def __init__(self, dim, kernel=9, dropout=0.1):
        super().__init__()
        padding = (kernel - 1) // 2
        self.dw = nn.Conv1d(dim, dim, kernel, padding=padding, groups=dim)
        self.pw = nn.Conv1d(dim, dim, kernel_size=1)
        self.act = nn.SiLU()
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):  # (B, T, D)
        y = x.transpose(1, 2)           # (B, D, T)
        y = self.dw(y)
        y = self.pw(y)
        y = self.act(y).transpose(1, 2) # (B, T, D)
        return self.dropout(y)

# -------- Conformer Block（FFN/2 - Conv - MHSA - FFN/2）--------
class ConformerBlock(nn.Module):
    def __init__(self, dim, nhead=6, ffn_mult=4, attn_dropout=0.1, dropout=0.1):
        super().__init__()
        self.ffn1 = nn.Sequential(
            nn.LayerNorm(dim),
            nn.Linear(dim, dim * ffn_mult),
            nn.SiLU(),
            nn.Dropout(dropout),
            nn.Linear(dim * ffn_mult, dim),
            nn.Dropout(dropout),
        )
        self.conv = nn.Sequential(
            nn.LayerNorm(dim),
            DwConv1d(dim, kernel=9, dropout=dropout),
        )
        self.attn_norm = nn.LayerNorm(dim)
        self.attn = nn.MultiheadAttention(dim, nhead, dropout=attn_dropout, batch_first=True)
        self.ffn2 = nn.Sequential(
            nn.LayerNorm(dim),
            nn.Linear(dim, dim * ffn_mult),
            nn.SiLU(),
            nn.Dropout(dropout),
            nn.Linear(dim * ffn_mult, dim),
            nn.Dropout(dropout),
        )

    def forward(self, x, attn_mask=None):  # x: (B, T, D)
        # FFN/2
        x = x + 0.5 * self.ffn1(x)
        # Conv
        x = x + self.conv(x)
        # MHSA
        y = self.attn_norm(x)
        y, _ = self.attn(y, y, y, attn_mask=attn_mask, need_weights=False)
        x = x + y
        # FFN/2
        x = x + 0.5 * self.ffn2(x)
        return x

def causal_mask(T):
    # (T, T) upper-triangular True mask for future positions (for MultiheadAttention)
    return torch.triu(torch.ones(T, T, dtype=torch.bool), diagonal=1)

class conformer_xattn_model(nn.Module):
    def __init__(self, config, input_size=61, d_model=192, nhead=6, num_layers=4, dropout=0.1, ffn_mult=4):
        super().__init__()
        self.settings = config
        self.in_proj = nn.Linear(input_size, d_model)
        self.pos = SinePosEnc(d_model)
        self.enc_layers = nn.ModuleList([
            ConformerBlock(d_model, nhead=nhead, ffn_mult=ffn_mult, attn_dropout=dropout, dropout=dropout)
            for _ in range(num_layers)
        ])
        # 解码端：把 tgt 当作查询（可利用你已有占位特征）
        self.tgt_proj = nn.Linear(input_size, d_model)
        self.tgt_pos = SinePosEnc(d_model)
        self.cross_norm = nn.LayerNorm(d_model)
        self.cross_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)
        self.head = nn.Sequential(
            nn.LayerNorm(d_model),
            nn.Linear(d_model, 1)
        )
        nn.init.normal_(self.head[1].weight, 0, .02)
        nn.init.constant_(self.head[1].bias, 0.0)

    def forward(self, src, tgt):
        # 编码器（因果掩码，防止“偷看未来”）
        x = self.in_proj(src)          # (B, T_in, D)
        x = self.pos(x)
        T_in = x.size(1)
        attn_mask_enc = causal_mask(T_in).to(x.device)
        for blk in self.enc_layers:
            x = blk(x, attn_mask=attn_mask_enc)

        # 解码器：tgt 作为查询序列（一次性产生多步）
        q = self.tgt_proj(tgt)         # (B, T_out, D)
        q = self.tgt_pos(q)
        qn = self.cross_norm(q)
        # Cross-Attention: Q=qn, K=V=encoder outputs
        y, _ = self.cross_attn(qn, x, x, need_weights=False)  # (B, T_out, D)

        out = self.head(y)             # (B, T_out, 1)
        out = torch.tanh(out) * MAX_SPD / 2 + MAX_SPD / 2
        return out[:, -self.settings.PRED_LEN:, :]
class InterpHead(nn.Module):
    """把低分辨率 forecast 上采样到目标 H（线性插值实现）。"""
    def __init__(self, H):
        super().__init__()
        self.H = H

    def forward(self, f_low):  # (B, h_low)
        B, h_low = f_low.shape
        if h_low == self.H:
            return f_low
        # 线性插值到 H
        idx_src = torch.linspace(0, h_low - 1, steps=h_low, device=f_low.device)
        idx_tgt = torch.linspace(0, h_low - 1, steps=self.H, device=f_low.device)
        f = F.interpolate(f_low.unsqueeze(1), size=self.H, mode='linear', align_corners=False)
        return f.squeeze(1)

class NHiTSBlock(nn.Module):
    """一个分辨率上的 N-HiTS block（Conv 版），产生 backcast 与 forecast。"""
    def __init__(self, d_in, t_in, h_out, hidden=256, conv=True):
        super().__init__()
        self.t_in = t_in
        self.h_out = h_out
        if conv:
            # 保留时序结构：Depthwise + Pointwise
            self.feat = nn.Sequential(
                nn.Conv1d(d_in, d_in, kernel_size=3, padding=1, groups=d_in),
                nn.Conv1d(d_in, hidden, kernel_size=1),
                nn.SiLU(),
                nn.Dropout(0.1),
                nn.Conv1d(hidden, hidden, kernel_size=3, padding=1, groups=1),
                nn.SiLU(),
                nn.Dropout(0.1),
            )
            self.backcast = nn.Sequential(
                nn.Conv1d(hidden, d_in, kernel_size=1),
            )
            self.forecast = nn.Sequential(
                nn.AdaptiveAvgPool1d(1),      # 汇聚到全局
                nn.Flatten(),                 # (B, hidden)
                nn.LayerNorm(hidden),
                nn.Linear(hidden, h_out),
            )
        else:
            # MLP 版：展平成一维处理
            self.feat = nn.Sequential(
                nn.Flatten(),
                nn.Linear(d_in * t_in, hidden),
                nn.SiLU(),
                nn.Dropout(0.1),
                nn.Linear(hidden, hidden),
                nn.SiLU(),
                nn.Dropout(0.1),
            )
            self.backcast = nn.Linear(hidden, d_in * t_in)
            self.forecast = nn.Linear(hidden, h_out)

    def forward(self, x):  # Conv 版 x:(B, d_in, t_in) ; MLP 版 x:(B, d_in, t_in) 但会被 Flatten
        h = self.feat(x) if isinstance(self.feat[0], nn.Flatten) else self.feat(x)  # 兼容
        if isinstance(self.backcast, nn.Linear):
            b = self.backcast(h).view(x.size(0), x.size(1), self.t_in)  # (B, d_in, t_in)
            f = self.forecast(h)                                        # (B, h_out)
        else:
            b = self.backcast(h)                                        # (B, d_in, t_in)
            f = self.forecast(h)                                        # (B, h_out)
        return b, f

class nhits_model(nn.Module):
    """
    与你的 nn_model 接口一致：forward(self, src) -> (B, PRED_LEN, 1)
    多分辨率 Stacks：{×1, ×2, ×4}，每个分辨率上若干 Block，逐层 backcast 残差、forecast 累加。
    """
    def __init__(self, config, input_size=61, hidden=256, stacks_per_scale=2, use_conv=True):
        super().__init__()
        self.settings = config
        self.H = config.PRED_LEN
        self.D = 64  # 投影维度，控制容量
        self.in_proj = nn.Linear(input_size, self.D)

        # 定义三个分辨率：1x / 2x / 4x
        self.scales = [1, 2, 4]
        self.blocks = nn.ModuleList()
        self.up_heads = nn.ModuleList()

        for s in self.scales:
            t_in_s = math.ceil(config.INPUT_LEN / s)
            h_out_s = max(1, math.ceil(self.H / s))
            for _ in range(stacks_per_scale):
                self.blocks.append(NHiTSBlock(d_in=self.D, t_in=t_in_s, h_out=h_out_s, hidden=hidden, conv=use_conv))
                self.up_heads.append(InterpHead(self.H))

        self.out_head = nn.Linear(self.H, self.H)  # 轻微再映射（稳定性）
        nn.init.eye_(self.out_head.weight)
        nn.init.constant_(self.out_head.bias, 0.0)
        self.final_proj = nn.Linear(1, 1)  # 留作占位（便于后续扩展）

    def _downsample(self, x, s):  # x:(B, T, D) -> (B, ceil(T/s), D)
        if s == 1: return x
        T = x.size(1)
        T_s = math.ceil(T / s)
        idx = torch.linspace(0, T - 1, steps=T_s, device=x.device).long()
        return x.index_select(dim=1, index=idx)

    def forward(self, src):
        # 投影到 D，并转 (B, D, T) 以便 Conv 版处理
        x = self.in_proj(src)               # (B, T, D)
        B, T, D = x.shape
        forecast_sum = torch.zeros(B, self.H, device=x.device)
        # 逐分辨率 stacks
        ptr = 0
        for s in self.scales:
            x_s = self._downsample(x, s)    # (B, T_s, D)
            xs = x_s.transpose(1, 2)        # (B, D, T_s)
            # 每个分辨率上的多个 block
            for _ in range(2):  # stacks_per_scale = 2
                b, f_low = self.blocks[ptr](xs)   # b:(B,D,T_s), f_low:(B,h_low)
                ptr += 1
                # 残差更新（backcast）：在该分辨率上“解释掉”已学成分
                xs = xs - b
                # 低分辨率 forecast 上采样到 H 并累加
                f = self.up_heads[ptr-1](f_low)   # (B, H)
                forecast_sum = forecast_sum + f

        # 轻微线性再映射（稳定）
        forecast_sum = self.out_head(forecast_sum)  # (B, H)
        out = forecast_sum.unsqueeze(-1)            # (B, H, 1)
        out = torch.tanh(out) * MAX_SPD / 2 + MAX_SPD / 2
        return out[:, -self.settings.PRED_LEN:, :]
