import torch
from math import sqrt
import torch.nn as nn
from torch import Tensor
from typing import Optional
from argparse import ArgumentParser


# 特制残差模块，处理空间环境数据和 TEC 数据
class SpecificResNet(nn.Module):
    
    def __init__(self, num_features: int = 6, d_merge: int = 16) -> None:
        super(SpecificResNet, self).__init__()
        # 前馈神经网络
        self.feedforward = nn.Sequential(
            nn.Linear(in_features=num_features, out_features=d_merge, bias=False),
            nn.SiLU(),
            nn.Dropout(0.1),
            nn.Linear(in_features=d_merge, out_features=1, bias=False)
        )
        # 合成参数
        self.w = nn.Parameter(torch.tensor([0.3]))
        
    # 输入数据维度: B T N
    # 输出数据维度: B T
    # 数据输入前必须归一化, TEC 在 0 维
    def forward(self, x: Tensor) -> Tensor:
        return x[..., 0] + self.w * self.feedforward(x).squeeze(-1)


# Patch Reprogram 模块
class PatchReprogram(nn.Module):
    
    def __init__(self,
                 configs: ArgumentParser,
                 llm_dim: int,                       # LLM 内部维度
                 vocab_size: int):                   # 词表大小
        super(PatchReprogram, self).__init__()
        # Patch Embedder: 将每个 Patch 的长度映射为 d_model
        self.patch_embedder = PatchEmbedder(
            d_model=configs.d_model,
            patch_len=configs.patch_len,
            stride=configs.patch_stride,
            dropout=configs.dropout
        )
        # 将词嵌入向量映射为少量文本原型 (Text Prototypes)
        self.mapping_layer = nn.Linear(
            in_features=vocab_size,
            out_features=configs.text_prototype_num
        )
        # 重编程层
        self.reprogramming_layer = ReprogrammingLayer(
            d_llm=llm_dim,
            d_model=configs.d_model,
            n_heads=configs.n_heads,
            d_keys=configs.d_ff
        )
    
    # 输入数据维度: B T 和 V d_llm
    # 输出数据维度: B P d_llm
    def forward(self, x: Tensor, word_embeddings: Tensor) -> Tensor:
        # 一维时序序列进入 Patch Embedder 分片并嵌入
        x = self.patch_embedder(x)
        # 生成 Text Prototypes
        text_prototypes = self.mapping_layer(word_embeddings.transpose(0, 1)).transpose(0, 1).contiguous()
        # 重编程
        return self.reprogramming_layer(x, text_prototypes, text_prototypes)


# Patch Embedder
class PatchEmbedder(nn.Module):
    
    def __init__(self, 
                 d_model: int = 32,           # Patch 嵌入后的长度
                 patch_len: int = 16,         # 每个 Patch 的长度
                 stride: int = 8,            # 分割 Patch 时的步长
                 dropout: float = 0.1):
        super(PatchEmbedder, self).__init__()
        # Patching
        self.patch_len = patch_len
        self.stride = stride
        # 将 patch_len 长度映射到 d_model 长度
        padding = 1 if torch.__version__ >= '1.5.0' else 2
        self.tokenConv = nn.Conv1d(in_channels=patch_len,
                                   out_channels=d_model,
                                   kernel_size=3,
                                   padding=padding,
                                   padding_mode='circular',
                                   bias=False)
        # 初始化一维卷积核
        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                nn.init.kaiming_normal_(
                    m.weight, mode='fan_in', nonlinearity='leaky_relu')
        # Residual dropout
        self.dropout = nn.Dropout(dropout)

    # 输入数据维度: B T
    # 输出数据维度: B P d_model
    def forward(self, x: Tensor) -> Tensor:
        # Patch 操作
        x = x.unfold(dimension=-1, size=self.patch_len, step=self.stride)
        # Input encoding
        x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
        return self.dropout(x)


# 重编程层
class ReprogrammingLayer(nn.Module):
    
    def __init__(self,
                 d_model: int,
                 n_heads: int,                              # 头数
                 d_llm: int,                                # LLM 的 d_model 大小
                 d_keys: Optional[int] = None,              # 中间维度
                 attention_dropout: float = 0.1):
        super(ReprogrammingLayer, self).__init__()
        # 中间维度
        d_keys = d_keys or (d_model // n_heads)
        # Q K V O 映射矩阵
        self.query_projection = nn.Linear(d_model, d_keys * n_heads)
        self.key_projection = nn.Linear(d_llm, d_keys * n_heads)
        self.value_projection = nn.Linear(d_llm, d_keys * n_heads)
        self.out_projection = nn.Linear(d_keys * n_heads, d_llm)
        self.n_heads = n_heads
        self.dropout = nn.Dropout(attention_dropout)

    # 输入维度: B P d_model 和 S d_llm
    # 输出维度: B P d_llm
    def forward(self, target_embedding: Tensor, source_embedding: Tensor, value_embedding: Tensor) -> Tensor:
        B, P, _ = target_embedding.shape
        S, _ = source_embedding.shape
        H = self.n_heads
        # Q K V 映射
        target_embedding = self.query_projection(target_embedding).view(B, P, H, -1)
        source_embedding = self.key_projection(source_embedding).view(S, H, -1)
        value_embedding = self.value_projection(value_embedding).view(S, H, -1)
        # out = self.reprogramming(target_embedding, source_embedding, value_embedding)
        # 缩放系数
        scale = 1. / sqrt(target_embedding.shape[-1])
        # 计算注意力矩阵和输出
        scores = torch.einsum("blhe,she->bhls", target_embedding, source_embedding)
        A = self.dropout(torch.softmax(scale * scores, dim=-1))
        out = torch.einsum("bhls,she->blhe", A, value_embedding).reshape(B, P, -1)
        # 输出映射
        return self.out_projection(out)


# 输出映射模块
class OutputProjection(nn.Module):
    
    def __init__(self,
                 patch_num: int,            # patch 个数
                 d_llm: int,                # LLM 内部维度
                 d_ff: int = 64,            # d_llm 降维后维度
                 pred_len: int = 48,        # 预测长度
                 dropout: float = 0.1):
        super(OutputProjection, self).__init__()
        # 记录 patch 个数
        self.patch_num = patch_num
        # 降维映射
        self.down_dllm = nn.Linear(in_features=d_llm, out_features=d_ff)
        # 展平
        self.flatten = nn.Flatten(start_dim=-2)
        # 映射到预测长度
        self.pred_projection = nn.Linear(in_features=patch_num*d_ff, out_features=pred_len)
        # Dropout
        self.dropout = nn.Dropout(dropout)
        
    # 输入数据维度: B O d_llm
    # 输出数据维度: B L
    def forward(self, x: Tensor) -> Tensor:
        # 只取最后 patch_num 个数据
        x = x[:, -self.patch_num:, :]
        # d_llm 降维映射
        x = self.down_dllm(x)
        # 展平后两维
        x = self.flatten(x)
        # 映射到预测长度
        x = self.pred_projection(x)
        # 输出
        return self.dropout(x)