import torch
from transformers import AutoTokenizer
from Transformer import Transformer

# 设置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 加载预训练的tokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
src_vocab_size = tokenizer.vocab_size
trg_vocab_size = tokenizer.vocab_size

# 初始化模型
model = Transformer(
    src_pad_ix=0,
    trg_pad_ix=0,
    enc_voc_size=src_vocab_size,
    dec_voc_size=trg_vocab_size,
    d_model=512,
    max_len=512,
    n_heads=8,
    ffn_hidden=2048,
    n_layers=6,
    drop_prod=0.1,
    device=device
).to(device)

# 加载训练好的模型权重
checkpoint = torch.load('transformer_best_model.pth', map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()

# 定义特殊标记
BOS_TOKEN_IDX = 1
EOS_TOKEN_IDX = 2
PAD_TOKEN_IDX = 0

def prepare_input_sequence(text, max_length=20):
    """
    准备输入序列
    text: 英文文本字符串
    """
    # 使用tokenizer对输入文本进行编码
    tokens = tokenizer.encode(text, add_special_tokens=False)[:max_length-2]

    # 添加特殊标记
    src_seq = [tokenizer.cls_token_id or BOS_TOKEN_IDX] + tokens + [tokenizer.sep_token_id or EOS_TOKEN_IDX]

    # 填充到固定长度
    src_seq += [tokenizer.pad_token_id or PAD_TOKEN_IDX] * (max_length - len(src_seq))

    return torch.tensor([src_seq], dtype=torch.long).to(device)

def translate_sentence(model, src_tensor, max_length=50):
    """
    翻译句子
    model: 训练好的模型
    src_tensor: 编码后的源语言句子
    max_length: 最大生成长度
    """
    # 编码器前向传播
    src_mask = model.make_src_mask(src_tensor)
    enc_output = model.encoder(src_tensor, src_mask)

    # 初始化目标序列，以BOS标记开始
    trg_indices = [BOS_TOKEN_IDX]

    for i in range(max_length):
        # 构造目标张量
        trg_tensor = torch.LongTensor(trg_indices).unsqueeze(0).to(src_tensor.device)

        # 创建解码器掩码
        trg_mask = model.make_pad_mask(trg_tensor, trg_tensor, model.trg_pad_ix, model.trg_pad_ix) * \
                   model.make_casual_mask(trg_tensor, trg_tensor)
        src_cross_mask = model.make_src_mask(src_tensor)

        # 解码器前向传播
        output = model.decoder(trg_tensor, enc_output, trg_mask, src_cross_mask)

        # 获取最后一个词的预测
        pred_token = output.argmax(2)[:, -1].item()

        # 避免重复生成相同token
        if len(trg_indices) > 1 and pred_token == trg_indices[-1] and pred_token != EOS_TOKEN_IDX:
            # 可以采用top-k采样或其他策略避免重复
            pass
        trg_indices.append(pred_token)

        # 如果预测到结束符则停止
        if pred_token == EOS_TOKEN_IDX:
            break

    return trg_indices

def translate_text(english_text):
    """
    将英文文本翻译成法文
    english_text: 输入的英文文本
    """
    # 准备输入
    src_tensor = prepare_input_sequence(english_text)

    # 执行翻译
    translated_tokens = translate_sentence(model, src_tensor, max_length=50)

    # 解码为文本
    translated_text = tokenizer.decode(translated_tokens[1:-1], skip_special_tokens=True)

    return translated_text

# 使用示例
if __name__ == "__main__":
    # 测试翻译
    english_sentences = [
        "Hello, how are you?",
        "I love machine learning.",
        "The weather is nice today."
    ]

    for sentence in english_sentences:
        french_translation = translate_text(sentence)
        print(f"English: {sentence}")
        print(f"French:  {french_translation}")
        print("-" * 50)
