import torch
from torch import nn
import torch.nn.functional as F
import math

d_model = 512
n_head = 8

class TokenEmbedding(nn.Embedding):
    def __init__(self, vocab_size, d_model):
        super(TokenEmbedding, self).__init__(vocab_size, d_model, padding_idx=1)

class PositionalEmbedding(nn.Module):
    def __init__(self, d_model, max_len=5000, device='cpu'):
        super(PositionalEmbedding, self).__init__()
        self.encoding = torch.zeros(max_len, d_model,device=device)
        self.encoding.requires_grad = False
        # pos = torch.arange(0, max_len, device=device)
        # pos = pos.float().unsqueeze(dim=1)
        # _2i = torch.arange(0, d_model, step=2, device=device).float()
        # self.encoding[:, 0::2] = torch.sin(pos / (10000 ** (_2i / d_model)))
        # self.encoding[:, 1::2] = torch.cos(pos / (10000 ** (_2i / d_model)))
        # 生成位置信息
        pos = torch.arange(0, max_len, device=device).float().unsqueeze(dim=1)
        # 生成维度信息
        _2i = torch.arange(0, d_model, step=2, device=device).float()

        # 计算角度
        angle_rates = 1 / torch.pow(10000, (2 * (_2i // 2)) / d_model)
        angles = pos * angle_rates

        # 应用正弦和余弦函数
        self.encoding[:, 0::2] = torch.sin(angles)
        self.encoding[:, 1::2] = torch.cos(angles)

    def forward(self, x):
        batch_size, seq_len = x.size()
        return self.encoding[:seq_len, :]

class TransformerEmbedding(nn.Module):
    def __init__(self, vocab_size, d_model, max_len=5000, drop_prob=0.1, device='cpu'):
        super(TransformerEmbedding, self).__init__()
        self.token_embedding = TokenEmbedding(vocab_size, d_model)
        self.position_embedding = PositionalEmbedding(d_model, max_len, device)
        self.embedding_dropout = nn.Dropout(p = drop_prob)

    def forward(self, x):
        out = self.token_embedding(x) + self.position_embedding(x)
        out = self.embedding_dropout(out)
        return out

class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, n_head):
        super(MultiHeadAttention, self).__init__()
        self.d_model = d_model
        self.n_head = n_head
        self.w_q = nn.Linear(d_model, d_model)
        self.w_k = nn.Linear(d_model, d_model)
        self.w_v = nn.Linear(d_model, d_model)
        self.w_combine = nn.Linear(d_model, d_model)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, query, key, value, mask=None):
        batch_q, time_q, dim_q = query.shape
        # 确保key和value也是3维
        if len(key.shape) == 2:
            key = key.unsqueeze(0)  # 如果是2维，添加batch维度
        if len(value.shape) == 2:
            value = value.unsqueeze(0)
        batch_k, time_k, dim_k = key.shape
        batch_v, time_v, dim_v = value.shape

        # 验证维度一致性
        if dim_q != self.d_model:
            raise ValueError(f"Query dimension {dim_q} doesn't match expected d_model {self.d_model}")
        if dim_k != self.d_model:
            raise ValueError(f"Key dimension {dim_k} doesn't match expected d_model {self.d_model}")
        if dim_v != self.d_model:
            raise ValueError(f"Value dimension {dim_v} doesn't match expected d_model {self.d_model}")

        n_d = self.d_model // self.n_head
        q = self.w_q(query)
        k = self.w_k(key)
        v = self.w_v(value)

        # 重新组织为多头格式
        q = q.view(batch_q, time_q, self.n_head, n_d).permute(0, 2, 1, 3)
        k = k.view(batch_k, time_k, self.n_head, n_d).permute(0, 2, 1, 3)
        v = v.view(batch_v, time_v, self.n_head, n_d).permute(0, 2, 1, 3)

        score = q@k.transpose(2,3)/math.sqrt(n_d)
        if mask is not None:
            # 确保mask维度正确
            if mask.dim() == 3:
                mask = mask.unsqueeze(1)  # 添加头维度
            score = score.masked_fill(mask == 0, -1e9)
        score = self.softmax(score)@ v
        # 重新组合多头结果
        score = score.permute(0, 2, 1, 3).contiguous().view(batch_q, time_q, self.d_model)

        # 对多头注意力机制的输出进行线性变换，[1, 512, 512] -> [1, 512, 512]
        out = self.w_combine(score)
        return out

class LayerNorm(nn.Module):
    def __init__(self, d_model, eps=1e-5):
        super(LayerNorm, self).__init__()
        self.eps = eps
        self.gamma = nn.Parameter(torch.ones(d_model))
        self.beta = nn.Parameter(torch.zeros(d_model))

    def forward(self, x):
        mean = x.mean(-1, keepdim=True)
        var = x.var(-1,unbiased=False, keepdim=True)
        out = (x - mean) / torch.sqrt(var + self.eps)
        out = self.gamma * out + self.beta
        return out

class PositionwiseFeedForward(nn.Module):
    def __init__(self, d_model, hidden, dropout=0.1):
        super(PositionwiseFeedForward, self).__init__()
        self.fc1 = nn.Linear(d_model, hidden)
        self.fc2 = nn.Linear(hidden, d_model)
        self.dropout = nn.Dropout(dropout)
    def forward(self, x):
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        return x

class EncoderLayer(nn.Module):
    def __init__(self, d_model, ffn_hidden, n_head, dropout=0.1):
        super(EncoderLayer, self).__init__()
        self.attention = MultiHeadAttention(d_model, n_head)
        self.norm1 = LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.ffn = PositionwiseFeedForward(d_model, ffn_hidden, dropout)
        self.norm2 = LayerNorm(d_model)
        self.dropout2 = nn.Dropout(dropout)

    def forward(self, x, mask=None):
        _x = x
        x = self.attention(x,x,x,mask)
        x = self.dropout1(x)
        x = self.norm1(x + _x)
        _x = x
        x = self.ffn(x)
        x = self.dropout2(x)
        x = self.norm2(x + _x)
        return x

class Encoder(nn.Module):
    def __init__(self, enc_voc_size, max_len, d_model, ffn_hidden, n_head, n_layer, dropout=0.1, device='cpu'):
        super(Encoder, self).__init__()
        self.embedding = TransformerEmbedding(enc_voc_size, d_model, max_len, dropout, device)
        self.layers = nn.ModuleList(
            [
                EncoderLayer(d_model, ffn_hidden, n_head, dropout) for _ in range(n_layer)
            ]
        )
    def forward(self, x, s_mask):
        x = self.embedding(x)
        for layer in self.layers:
            x = layer(x, s_mask)
        return x

class DecoderLayer(nn.Module):
    def __init__(self, d_model, ffn_hidden, n_head, drop_prob):
        super(DecoderLayer, self).__init__()
        self.attention = MultiHeadAttention(d_model, n_head)
        self.norm1 = LayerNorm(d_model)
        self.dropout1 = nn.Dropout(drop_prob)
        self.cross_attention = MultiHeadAttention(d_model, n_head)
        self.norm2 = LayerNorm(d_model)
        self.dropout2 = nn.Dropout(drop_prob)
        self.ffn = PositionwiseFeedForward(d_model, ffn_hidden, drop_prob)
        self.norm3 = LayerNorm(d_model)
        self.dropout3 = nn.Dropout(drop_prob)
    def forward(self, dec, enc, tgt_mask, src_mask):
        _x = dec
        x = self.attention(dec, dec, dec, tgt_mask)
        x = self.dropout1(x)
        x = self.norm1(x + _x)
        _x = x
        x = self.cross_attention(x, enc, enc, src_mask)
        x = self.dropout2(x)
        x = self.norm2(x + _x)
        x = self.ffn(x)
        x = self.dropout3(x)
        x = self.norm3(x + _x)
        return x

class Decoder(nn.Module):
    def __init__(self, dec_voc_size, max_len, d_model, ffn_hidden, n_head, n_layer, drop_prob, device):
        super(Decoder, self).__init__()
        self.embedding = TransformerEmbedding(dec_voc_size, d_model, max_len, drop_prob, device)
        self.layers = nn.ModuleList(
            [
                DecoderLayer(d_model, ffn_hidden, n_head, drop_prob) for _ in range(n_layer)
            ]
        )
        self.fc = nn.Linear(d_model, dec_voc_size)
    def forward(self, dec, enc, tgt_mask, src_mask):
        dec = self.embedding(dec)
        for layer in self.layers:
            dec = layer(dec, enc, tgt_mask, src_mask)
        dec = self.fc(dec)
        return dec

class Transformer(nn.Module):
    def __init__(self,
                 src_pad_ix,
                 trg_pad_ix,
                 enc_voc_size,
                 dec_voc_size,
                 d_model,
                 max_len,
                 n_heads,
                 ffn_hidden,
                 n_layers,
                 drop_prod,
                 device):
        super(Transformer, self).__init__()
        self.encoder = Encoder(enc_voc_size, max_len, d_model, ffn_hidden, n_heads, n_layers, drop_prod, device)
        self.decoder = Decoder(dec_voc_size, max_len, d_model, ffn_hidden, n_heads, n_layers, drop_prod, device)
        self.src_pad_ix = src_pad_ix
        self.trg_pad_ix = trg_pad_ix
        self.device = device

    def make_pad_mask(self, q, k, pad_idx_q, pad_idx_k):
        len_q, len_k = q.size(1), k.size(1)
        q = q.ne(pad_idx_q).unsqueeze(1).unsqueeze(3)
        q = q.repeat(1, 1, 1, len_k)
        k = k.ne(pad_idx_k).unsqueeze(1).unsqueeze(2)
        k = k.repeat(1, 1, len_q, 1)
        mask = q & k
        return mask

    def make_casual_mask(self, q, k):
        mask = torch.tril(torch.ones(q.size(1), k.size(1))).type(torch.BoolTensor).to(self.device)
        return mask

    def make_src_mask(self, src):
        # 为交叉注意力创建源掩码
        src_mask = src.ne(self.src_pad_ix).unsqueeze(1).unsqueeze(2)
        return src_mask

    def forward(self, src, trg):
        src_mask = self.make_pad_mask(src, src, self.src_pad_ix, self.src_pad_ix)
        trg_mask = self.make_pad_mask(trg, trg, self.trg_pad_ix, self.trg_pad_ix) * self.make_casual_mask(trg, trg)
        src_cross_mask = self.make_src_mask(src)  # 为交叉注意力创建的掩码
        enc = self.encoder(src, src_mask)
        out = self.decoder(trg, enc, trg_mask, src_cross_mask)
        return out




if __name__ == '__main__':
    # 设置模型参数
    src_pad_ix = 0  # 源序列填充符索引
    trg_pad_ix = 0  # 目标序列填充符索引
    enc_voc_size = 10000  # 源词汇表大小
    dec_voc_size = 8000  # 目标词汇表大小
    d_model = 512  # 模型维度
    max_len = 512  # 最大序列长度
    n_heads = 8  # 注意力头数
    ffn_hidden = 2048  # 前馈网络隐藏层维度
    n_layers = 6  # 编码器和解码器层数
    drop_prod = 0.1  # dropout概率
    batch_size = 2  # 批次大小
    src_seq_len = 10  # 源序列长度
    trg_seq_len = 8  # 目标序列长度

    # 创建Transformer实例
    transformer = Transformer(
        src_pad_ix=src_pad_ix,
        trg_pad_ix=trg_pad_ix,
        enc_voc_size=enc_voc_size,
        dec_voc_size=dec_voc_size,
        d_model=d_model,
        max_len=max_len,
        n_heads=n_heads,
        ffn_hidden=ffn_hidden,
        n_layers=n_layers,
        drop_prod=drop_prod,
        device='cpu'
    )

    # 创建模拟输入数据
    src = torch.randint(1, enc_voc_size, (batch_size, src_seq_len)).long()  # 源序列输入
    trg = torch.randint(1, dec_voc_size, (batch_size, trg_seq_len)).long()  # 目标序列输入

    # 确保填充符存在（可选）
    src[:, 0] = src_pad_ix  # 将第一个位置设为填充符作为示例
    trg[:, 0] = trg_pad_ix

    # 运行前向传播
    output = transformer(src, trg)

    # 打印结果
    print(f"Source input shape: {src.shape}")  # [batch_size, src_seq_len]
    print(f"Target input shape: {trg.shape}")  # [batch_size, trg_seq_len]
    print(f"Transformer output shape: {output.shape}")  # [batch_size, trg_seq_len, dec_voc_size]
    print(f"Sample output[0, :2, :5]:\n{output[0, :2, :5]}")

