import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
import numpy as np
import spacy
import random
from tqdm import tqdm
import math

# 设置随机种子确保结果可复现
SEED = 42
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)

# 1. 数据处理部分
def load_data():
    """加载并处理Multi30k数据集用于英法翻译"""
    # 加载分词器
    spacy_de = spacy.load('de_core_news_sm')
    spacy_en = spacy.load('en_core_web_sm')
    
    # 定义分词函数
    def tokenize_de(text):
        return [tok.text for tok in spacy_de.tokenizer(text)]
    
    def tokenize_en(text):
        return [tok.text for tok in spacy_en.tokenizer(text)]
    
    # 创建Field对象
    SRC = Field(tokenize=tokenize_de, 
                init_token='<sos>', 
                eos_token='<eos>', 
                lower=True, 
                batch_first=True)
    
    TRG = Field(tokenize=tokenize_en, 
                init_token='<sos>', 
                eos_token='<eos>', 
                lower=True, 
                batch_first=True)
    
    # 加载数据集
    train_data, valid_data, test_data = Multi30k.splits(exts=('.de', '.en'), 
                                                          fields=(SRC, TRG))
    
    # 构建词汇表
    SRC.build_vocab(train_data, min_freq=2)
    TRG.build_vocab(train_data, min_freq=2)
    
    # 创建数据迭代器
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    BATCH_SIZE = 128
    
    train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
        (train_data, valid_data, test_data), 
        batch_size=BATCH_SIZE,
        sort_within_batch=True,
        sort_key=lambda x: len(x.src),
        device=device)
    
    return SRC, TRG, train_iterator, valid_iterator, test_iterator, device

# 2. Transformer模型构建部分
class PositionalEncoding(nn.Module):
    """位置编码层，为模型提供序列位置信息"""
    def __init__(self, d_model, max_len=5000):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        
        self.register_buffer('pe', pe.unsqueeze(0))
    
    def forward(self, x):
        return x + self.pe[:, :x.size(1)]

class TransformerModel(nn.Module):
    """完整的Transformer模型架构"""
    def __init__(self, input_dim, output_dim, d_model, nhead, num_encoder_layers, 
                 num_decoder_layers, dim_feedforward, max_seq_len, dropout=0.1):
        super(TransformerModel, self).__init__()
        
        # 嵌入层
        self.src_embedding = nn.Embedding(input_dim, d_model)
        self.trg_embedding = nn.Embedding(output_dim, d_model)
        
        # 位置编码
        self.pos_encoder = PositionalEncoding(d_model, max_seq_len)
        
        # Transformer编码器和解码器
        encoder_layer = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, batch_first=True)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers)
        
        decoder_layer = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, batch_first=True)
        self.decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers)
        
        # 输出层
        self.fc_out = nn.Linear(d_model, output_dim)
        
        # 其他参数
        self.d_model = d_model
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, src, trg, src_mask=None, trg_mask=None, memory_mask=None, 
                src_padding_mask=None, trg_padding_mask=None, memory_padding_mask=None):
        # 嵌入和位置编码
        src = self.dropout(self.pos_encoder(self.src_embedding(src) * math.sqrt(self.d_model)))
        trg = self.dropout(self.pos_encoder(self.trg_embedding(trg) * math.sqrt(self.d_model)))
        
        # 编码器处理
        memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_padding_mask)
        
        # 解码器处理
        output = self.decoder(trg, memory, tgt_mask=trg_mask, memory_mask=memory_mask,
                             tgt_key_padding_mask=trg_padding_mask,
                             memory_key_padding_mask=memory_padding_mask)
        
        # 输出层
        output = self.fc_out(output)
        return output
    
    def generate_square_subsequent_mask(self, sz):
        """生成上三角掩码，防止解码器关注未来位置"""
        mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask

# 3. 训练部分
def train(model, iterator, optimizer, criterion, clip, device):
    """训练模型一个epoch"""
    model.train()
    epoch_loss = 0
    
    for i, batch in enumerate(tqdm(iterator)):
        src = batch.src
        trg = batch.trg
        
        optimizer.zero_grad()
        
        # 生成目标序列掩码
        trg_mask = model.generate_square_subsequent_mask(trg.size(1)).to(device)
        
        # 前向传播
        output = model(src, trg[:, :-1], trg_mask=trg_mask)
        
        # 重塑输出和目标序列进行损失计算
        output_dim = output.shape[-1]
        output = output.contiguous().view(-1, output_dim)
        trg = trg[:, 1:].contiguous().view(-1)
        
        # 计算损失
        loss = criterion(output, trg)
        
        # 反向传播
        loss.backward()
        
        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
        
        # 更新参数
        optimizer.step()
        
        epoch_loss += loss.item()
    
    return epoch_loss / len(iterator)

def evaluate(model, iterator, criterion, device):
    """评估模型性能"""
    model.eval()
    epoch_loss = 0
    
    with torch.no_grad():
        for i, batch in enumerate(iterator):
            src = batch.src
            trg = batch.trg
            
            # 生成目标序列掩码
            trg_mask = model.generate_square_subsequent_mask(trg.size(1)).to(device)
            
            # 前向传播
            output = model(src, trg[:, :-1], trg_mask=trg_mask)
            
            # 重塑输出和目标序列进行损失计算
            output_dim = output.shape[-1]
            output = output.contiguous().view(-1, output_dim)
            trg = trg[:, 1:].contiguous().view(-1)
            
            # 计算损失
            loss = criterion(output, trg)
            
            epoch_loss += loss.item()
    
    return epoch_loss / len(iterator)

def run_training(model, train_iterator, valid_iterator, optimizer, criterion, clip, device, N_EPOCHS=10):
    """运行完整的训练过程"""
    best_valid_loss = float('inf')
    
    for epoch in range(N_EPOCHS):
        print(f'Epoch: {epoch+1}/{N_EPOCHS}')
        
        # 训练和验证
        train_loss = train(model, train_iterator, optimizer, criterion, clip, device)
        valid_loss = evaluate(model, valid_iterator, criterion, device)
        
        print(f'\tTrain Loss: {train_loss:.4f}')
        print(f'\t Val. Loss: {valid_loss:.4f}')
        
        # 保存最佳模型
        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            torch.save(model.state_dict(), 'transformer-model.pt')
    
    return model

# 4. 推理部分
def translate_sentence(sentence, src_field, trg_field, model, device, max_len=50):
    """将德语句子翻译成英语"""
    model.eval()
    
    # 预处理输入句子
    if isinstance(sentence, str):
        spacy_de = spacy.load('de_core_news_sm')
        tokens = [token.text.lower() for token in spacy_de(sentence)]
    else:
        tokens = [token.lower() for token in sentence]
    
    # 添加开始和结束标记，转换为索引
    tokens = [src_field.init_token] + tokens + [src_field.eos_token]
    src_indexes = [src_field.vocab.stoi[token] for token in tokens]
    
    # 转换为张量
    src_tensor = torch.LongTensor(src_indexes).unsqueeze(0).to(device)
    
    # 生成源序列掩码
    src_mask = torch.zeros((1, len(src_indexes))).to(device)
    
    # 编码源序列
    with torch.no_grad():
        memory = model.encoder(model.pos_encoder(model.src_embedding(src_tensor) * math.sqrt(model.d_model)), 
                              mask=src_mask)
    
    # 初始化目标序列
    trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]]
    
    # 自回归生成翻译结果
    for i in range(max_len):
        trg_tensor = torch.LongTensor(trg_indexes).unsqueeze(0).to(device)
        
        # 生成目标序列掩码
        trg_mask = model.generate_square_subsequent_mask(trg_tensor.size(1)).to(device)
        
        # 解码
        with torch.no_grad():
            output = model.decoder(model.pos_encoder(model.trg_embedding(trg_tensor) * math.sqrt(model.d_model)), 
                                  memory, tgt_mask=trg_mask)
        
        # 获取下一个词的预测
        pred_token = output.argmax(2)[:, -1].item()
        trg_indexes.append(pred_token)
        
        # 如果预测到结束标记，停止生成
        if pred_token == trg_field.vocab.stoi[trg_field.eos_token]:
            break
    
    # 将索引转换为单词
    trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes]
    
    # 返回翻译结果，不包括开始和结束标记
    return trg_tokens[1:-1]

# 主函数
def main():
    # 加载数据
    SRC, TRG, train_iterator, valid_iterator, test_iterator, device = load_data()
    
    # 模型参数
    INPUT_DIM = len(SRC.vocab)
    OUTPUT_DIM = len(TRG.vocab)
    D_MODEL = 256
    NHEAD = 8
    NUM_ENCODER_LAYERS = 3
    NUM_DECODER_LAYERS = 3
    DIM_FEEDFORWARD = 512
    MAX_SEQ_LEN = 100
    DROPOUT = 0.1
    
    # 初始化模型
    model = TransformerModel(INPUT_DIM, OUTPUT_DIM, D_MODEL, NHEAD, 
                            NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, 
                            DIM_FEEDFORWARD, MAX_SEQ_LEN, DROPOUT).to(device)
    
    # 初始化模型参数
    def init_weights(m):
        if hasattr(m, 'weight') and m.weight.dim() > 1:
            nn.init.xavier_uniform_(m.weight.data)
    
    model.apply(init_weights)
    
    # 优化器和损失函数
    optimizer = optim.Adam(model.parameters(), lr=0.0001)
    criterion = nn.CrossEntropyLoss(ignore_index=TRG.vocab.stoi[TRG.pad_token])
    
    # 训练模型
    CLIP = 1.0
    N_EPOCHS = 10
    model = run_training(model, train_iterator, valid_iterator, optimizer, criterion, CLIP, device, N_EPOCHS)
    
    # 加载最佳模型
    model.load_state_dict(torch.load('transformer-model.pt'))
    
    # 示例翻译
    example_sentence = "Ein Mann mit einem orangefarbenen Hut, der etwas anstarrt."
    translation = translate_sentence(example_sentence, SRC, TRG, model, device)
    print(f"翻译结果: {' '.join(translation)}")

if __name__ == "__main__":
main()
