import torch
from torch import nn
import numpy as np
from model.model_blocks import EmbedPosEnc, AttentionBlocks, Time_att, FFN, Bottlenecks
from einops import repeat

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 构建一个transformer模型，模型输入前23个feature，输出对标最后一个列的数据
# 因为输入的feature中有大量nan，利用 attention 把输入中 nan 位置忽视，只关注没有nan的feature
# 提供了all_mask_pack.npy文件, all_mask_pack的数据只是利用 np.isnan 转换而来，没有额外的mask信息


class S4Max(nn.Module):
    def __init__(self, args):
        super(S4Max, self).__init__()
        self.sigma_cls = nn.Parameter(torch.ones(1, 1, requires_grad=True, device=device), requires_grad=True) # 生成一个可训练的分类损失参数
        nn.init.kaiming_normal_(self.sigma_cls, mode='fan_out') # 初始化参数
        self.sigma_reg = nn.Parameter(torch.ones(1, 1, requires_grad=True, device=device), requires_grad=True) # 生成一个可训练的回归损失参数
        nn.init.kaiming_normal_(self.sigma_reg, mode='fan_out')  # 初始化参数

        d_model = args.d_model 
        hidden_dim = args.dff
        modal_nums = args.modal_nums
        self.num_layers = args.num_layers
        self.token = nn.Parameter(torch.ones(1, 1, d_model)) # 生成一个可训练的token @@绿色token

        self.ts_embedding = EmbedPosEnc(args.ts_input, d_model) # 张量嵌入以及生成位置编码
        self.ts_token = nn.Parameter(torch.ones(1, 1, d_model))   # 生成一个可训练的ts_token

        self.vel_embedding = EmbedPosEnc(args.vel_input, d_model)  # 张量嵌入以及生成位置编码
        self.vel_token = nn.Parameter(torch.ones(1, 1, d_model))   # 生成一个可训练的vel_token

        self.ts_att = nn.ModuleList() # 生成一个空的ModuleList
        self.ts_ffn = nn.ModuleList()
        self.vel_att = nn.ModuleList()
        self.vel_ffn = nn.ModuleList()
        self.cross_att = nn.ModuleList()
        self.cross_ffn = nn.ModuleList()

        for _ in range(self.num_layers):
            self.ts_att.append(AttentionBlocks(d_model, args.num_heads)) # 添加AttentionBlocks
            self.ts_ffn.append(FFN(d_model, hidden_dim)) # 添加FFN
            self.vel_att.append(AttentionBlocks(d_model, args.num_heads))
            self.vel_ffn.append(FFN(d_model, hidden_dim))
            self.cross_att.append(AttentionBlocks(d_model, args.num_heads)) # 添加AttentionBlocks
            self.cross_ffn.append(FFN(d_model, hidden_dim))

        self.bottlenecks = Bottlenecks(d_model, args) # Bottlenecks
        self.time_att = Time_att(dims=args.num_bnks) # Time_att
        self.endp = nn.Sequential(nn.Linear(2 * d_model, 128), 
                                  nn.ReLU(), 
                                  nn.Linear(128, 1),
                                  nn.ReLU()) # 全连接层
        self.relu = nn.ReLU()
        self.last = nn.Linear(args.num_bnks, 1) # 全连接层

    def forward(self, ts, vel):
        '''
            ts: [64, 16, 7]
            vel: [64, 16, 1]
        '''
        b = ts.shape[0]
        token = repeat(self.token, '() s e -> b s e', b=b) # 重复token，使尺寸匹配

        ts = self.ts_embedding(ts, self.ts_token) # 张量嵌入以及生成位置编码
        vel = self.vel_embedding(vel, self.vel_token) # 张量嵌入以及生成位置编码

        ts = self.ts_att[0](ts) # ts的自注意力
        token = torch.cat([token, ts[:, 0:1, :]], dim=1)  # 拼接token和ts
        vel = self.vel_att[0](vel) # vel的自注意力
        token = torch.cat([token, vel[:, 0:1, :]], dim=1) # 拼接token和vel
        token = self.cross_att[0](token) # token的交叉注意力
        token_new = token[:, 0:1, :] # 取出token的第一个元素
        ts = torch.cat([token_new, ts[:, 1:, :]], dim=1) # 拼接token_new和ts
        vel = torch.cat([token_new, vel[:, 1:, :]], dim=1) # 拼接token_new和vel
        ts = self.ts_ffn[0](ts) # ts的FFN
        vel = self.vel_ffn[0](vel) # vel的FFN
        token = self.cross_ffn[0](token)[:, 0:1, :] # token的FFN

        for i in range(self.num_layers - 1):
            ts = self.ts_att[i + 1](ts)
            token = torch.cat([token, ts[:, 0:1, :]], dim=1) 
            vel = self.vel_att[i + 1](vel)
            token = torch.cat([token, vel[:, 0:1, :]], dim=1)
            token = self.cross_att[i + 1](token)
            token_new = token[:, 0:1, :]
            ts = torch.cat([token_new, ts[:, 1:, :]], dim=1)
            vel = torch.cat([token_new, vel[:, 1:, :]], dim=1)
            ts = self.ts_ffn[i + 1](ts)
            vel = self.vel_ffn[i + 1](vel)
            token = self.cross_ffn[i + 1](token)[:, 0:1, :]


        cls_out = torch.cat([ts[:, 0:1, :], vel[:, 0:1, :]], dim=1) # 拼接ts的token和vel的token
        cls_out_flatten = torch.flatten(cls_out, start_dim=1) # 展平
        end_point = self.endp(cls_out_flatten) # 全连接层预测endpoint
        return end_point
    
        bnk = self.relu(self.time_att(self.bottlenecks(ts, vel))) # Bottlenecks
        pred = self.last(bnk) # 全连接层预测穿越行为
        return pred
        # return pred, end_point, self.sigma_cls, self.sigma_reg # 返回预测结果，endpoint预测结果，分类的sigma，回归的sigma
