import copy
import math
import torch
import logging

import numpy as np
import sentencepiece as spm
import torch.optim as optim

from tqdm import tqdm
from dataclasses import dataclass
from typing import List, Union, Optional, Dict

from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader

from transformers.utils import PaddingStrategy

# 位置编码器 (Positional Encoding)
class PositionalEncoding(nn.Module):
    def __init__(self, d_model=512, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        # 初始化位置编码矩阵 (max_len, d_model)
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)  # [max_len, 1]
        div_term = 10000 ** (-torch.arange(0, d_model, 2).float() / d_model)  # 频率因子

        pe[:, 0::2] = torch.sin(position * div_term)  # 偶数列
        pe[:, 1::2] = torch.cos(position * div_term)  # 奇数列
        pe = pe.unsqueeze(0)  # [1, max_len, d_model]

        # 注册为 buffer：不是参数，不会更新，但保存到模型中
        self.register_buffer("pe", pe)

# Multi-Head Attention
class MultiHeadAttention(nn.Module):
    def __init__(self, h: int, d_model: int, dropout: float = 0.1):
        """
                多头注意力机制
                :param h: 多头数
                :param d_model: 输入向量维度
                :param dropout: dropout比例
                """
        super(MultiHeadAttention, self).__init__()
        assert d_model % h == 0, "d_model必须能被h整除"

        self.d_k = d_model // h
        self.h = h

        self.linears = nn.ModuleList(
            [copy.deepcopy(nn.Linear(d_model, d_model)) for _ in range(4)]
        )

        self.attn = None  # 保存注意力权重
        self.dropout = nn.Dropout(p=dropout)

    def attention(self, query, key, value, mask=None, dropout=None):
        d_k = query.size(-1)

        scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)

        if mask is not None:
            scores = scores.masked_fill(mask == 0, -1e9)

        attn = F.softmax(scores, dim=-1)

        if dropout is not None:
            attn = dropout(attn)

        return torch.matmul(attn, value), attn

    def forward(self, query, key, value, mask=None):
        """
        多头注意力前向传播
        输入: [batch, seq_len, d_model]
        输出: [batch, seq_len, d_model]
        """
        if mask is not None:
            mask = mask.unsqueeze(1)  # 扩展到多头维度

        nbatches = query.size(0)

        # 1) 映射并拆分多头
        query, key, value = [
            l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
            for l, x in zip(self.linears, (query, key, value))
        ]

        x, self.attn = self.attention(query,key,value,mask=mask, dropout=self.dropout)
        x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
        return self.linears[-1](x)

# Layer Normalization
class LayerNormalization(nn.Module):
    """
    层标准化 LayerNorm
    """
    def __init__(self, features, eps=1e-6):
        super(LayerNormalization, self).__init__()
        self.gamma = nn.Parameter(torch.ones(features))  # 可学习缩放系数
        self.beta = nn.Parameter(torch.zeros(features))  # 可学习平移系数
        self.eps = eps
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # x: [batch, seq_len, d_model]
        mean = x.mean(dim=-1, keepdim=True)
        var = x.var(dim=-1, keepdim=True, unbiased=False)  # 方差
        return self.gamma * (x - mean) / torch.sqrt(var + self.eps) + self.beta

# Feed Forward Network (FFN)
class FFN(nn.Module):
    def __init__(self, d_model, d_ff, dropout=0.1):
        super(FFN, self).__init__()
        self.l1 = nn.Linear(d_model, d_ff)   # 升维
        self.l2 = nn.Linear(d_ff, d_model)   # 降维
        self.dropout = nn.Dropout(dropout)
    def forward(self, x):
        return self.l2(self.dropout(F.relu(self.l1(x))))

# Encoder
class BlockOne(nn.Module):
    def __init__(self, head_num, d_model, dropout):
        super(BlockOne, self).__init__()
        self.mha = MultiHeadAttention(head_num, d_model, dropout)
        self.ln = LayerNormalization(d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, query, key, value, mask=None):
        x_mha = self.mha(query, key, value, mask)
        # 残差连接 + Dropout
        query = query + self.dropout(x_mha)

        query = self.ln(query)
        return query

class BlockTwo(nn.Module):
    def __init__(self, d_model, d_ff, dropout):
        super(BlockTwo, self).__init__()
        self.ffn = FFN(d_model=d_model, d_ff=d_ff, dropout=dropout)
        self.ln = LayerNormalization(features=d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        fttn_x = self.ffn(x)
        x = x + self.dropout(fttn_x)
        x = self.ln(x)
        return x

class EncoderLayer(nn.Module):
    def __init__(self, head_num, d_model, d_ff, dropout):
        super(EncoderLayer, self).__init__()
        self.bk1 = BlockOne(head_num=head_num, d_model=d_model, dropout=dropout)
        self.bk2 = BlockTwo(d_model=d_model, d_ff=d_ff, dropout=dropout)

    def forward(self, x, mask=None):
        # Self-Attention + FFN
        x = self.bk1(x, x, x, mask)
        x = self.bk2(x)
        return x

# Decoder
class DecoderLayer(nn.Module):
    def __init__(self, head_num, d_model, d_ff, dropout):
        super(DecoderLayer, self).__init__()
        # 1) masked multi-head self-attention
        self.bk1 = BlockOne(head_num=head_num, d_model=d_model, dropout=dropout)
        # 2) encoder-decoder attention
        self.bk2 = BlockOne(head_num=head_num, d_model=d_model, dropout=dropout)
        # 3) FFN
        self.bk3 = BlockTwo(d_model=d_model, d_ff=d_ff, dropout=dropout)

    def forward(self, query, memory, src_mask=None, tgt_mask=None):
        # 1) masked self-attention
        out = self.bk1.forward(query=query, key=query, value=query, mask=tgt_mask)
        # 2) encoder-decoder attention
        out = self.bk2.forward(query=out, key=memory, value=memory, mask=src_mask)
        # 3) FFN
        out = self.bk3.forward(out)
        return out

class EncoderStack(nn.Module):
    """
    编码器堆栈（由多个 EncoderLayer 顺序堆叠）
    注意：这里的 residual（残差连接）已经在 EncoderLayer 内部实现，
         所以栈本身不需要额外处理残差。
    """

    def __init__(self, layer, layer_num):
        super(EncoderStack, self).__init__()
        # 深拷贝多份同样的 encoder 层
        self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(layer_num)])
        # 末尾加 LayerNorm（原论文在每个子层后 + 残差前有 norm）
        self.norm = LayerNormalization(layer.d_model)

    def forward(self, x, mask):
        for layer in self.layers:
            x = layer(x, mask)
        return self.norm(x)

class DecoderStack(nn.Module):
    """
    解码器堆叠（由多个 DecoderLayer 顺序堆叠）
    """
    def __init__(self, layer, layer_num):
        super(DecoderStack, self).__init__()
        self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(layer_num)])
        self.norm = LayerNormalization(layer.d_model)

    def forward(self, x, memory, src_mask, tgt_mask):
        """
        :param x: 目标序列 embedding
        :param memory: 来自 encoder 的输出
        :param src_mask: encoder padding mask
        :param tgt_mask: decoder 自回归 + padding mask
        """
        for layer in self.layers:
            x = layer(x, memory, src_mask, tgt_mask)
        return self.norm(x)


class Generator(nn.Module):
    """
    解码器输出映射层：Linear + LogSoftmax
    """

    def __init__(self, d_model:int, vocab:int):
        super(Generator, self).__init__()
        self.linear = nn.Linear(d_model, vocab)

    def forward(self, x):
        # 使用 log_softmax 而不是 softmax：
        # 1. 数值更稳定（避免极小概率 underflow）
        # 2. 方便与 NLLLoss 搭配使用
        return F.log_softmax(self.linear(x), dim=-1)



class Translate_Transformer(nn.Module):
    """
    整体翻译模型：Embedding + PositionalEncoding + EncoderStack + DecoderStack + Generator
    """
    def __init__(self, src_vocab_size:int, tgt_vocab_size:int,
                 head_num:int=8, layer_num:int=6,
                 d_model:int=512, d_ff:int=2048, dropout:float=0.1):
        super(Translate_Transformer, self).__init__()

        encoder_layer = EncoderLayer(head_num, d_model, d_ff, dropout)
        decoder_layer = DecoderLayer(head_num, d_model, d_ff, dropout)

        self.encoder_stack = EncoderStack(encoder_layer, layer_num)
        self.decoder_stack = DecoderStack(decoder_layer, layer_num)

        self.pe_encode = PositionalEncoding(d_model, dropout)
        self.pe_decode = PositionalEncoding(d_model, dropout)

        self.src_embedd = nn.Embedding(src_vocab_size, d_model)
        self.tgt_embedd = nn.Embedding(tgt_vocab_size, d_model)

        self.generator = Generator(d_model, tgt_vocab_size)

    def forward(self, src, tgt, src_mask, tgt_mask):
        src_embedding = self.pe_encode(self.src_embedd(src))
        tgt_embedding = self.pe_decode(self.tgt_embedd(tgt))

        encoder_output = self.encoder_stack(src_embedding, src_mask)
        decoder_output = self.decoder_stack(tgt_embedding, encoder_output, src_mask, tgt_mask)

        softmax_out = self.generator(decoder_output)
        return decoder_output, softmax_out



