import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import numpy as np

# 数据准备
# 扩充后的中文源词汇表，添加缺失的词汇
src_vocab = {
    '你好': 0, '谢谢': 1, '再见': 2, '早上好': 3, '晚上好': 4, '<pad>': 5
}

# 扩充后的英文目标词汇表，添加对应的翻译
tgt_vocab = {
    'hello': 0, 'thank you': 1, 'goodbye': 2, 'good morning': 3, 'good evening': 4, '<pad>': 5
}

src_vocab_size = len(src_vocab)
tgt_vocab_size = len(tgt_vocab)

# 示例训练数据
src_seqs = [
    ['你好'], ['谢谢'], ['再见'], ['早上好'], ['晚上好']
]
tgt_seqs = [
    ['hello'], ['thank you'], ['goodbye'], ['good morning'], ['good evening']
]


# 将文本序列转换为索引序列
def text_to_indices(texts, vocab):
    indices = []
    for text in texts:
        indices.append([vocab[word] for word in text])
    return indices


# 填充序列
def pad_sequences(sequences, max_len, pad_idx):
    padded_sequences = []
    for seq in sequences:
        if len(seq) < max_len:
            padded_seq = seq + [pad_idx] * (max_len - len(seq))
        else:
            padded_seq = seq[:max_len]
        padded_sequences.append(padded_seq)
    return padded_sequences


src_indices = text_to_indices(src_seqs, src_vocab)
tgt_indices = text_to_indices(tgt_seqs, tgt_vocab)

# 检查输入索引
for seq in src_indices:
    for idx in seq:
        if idx < 0 or idx >= src_vocab_size:
            print(f"Invalid index in src: {idx}")
for seq in tgt_indices:
    for idx in seq:
        if idx < 0 or idx >= tgt_vocab_size:
            print(f"Invalid index in tgt: {idx}")

max_src_len = max([len(seq) for seq in src_indices])
max_tgt_len = max([len(seq) for seq in tgt_indices])

src_padded = pad_sequences(src_indices, max_src_len, src_vocab['<pad>'])
tgt_padded = pad_sequences(tgt_indices, max_tgt_len, tgt_vocab['<pad>'])

src_tensor = torch.tensor(src_padded, dtype=torch.long)
tgt_tensor = torch.tensor(tgt_padded, dtype=torch.long)


# 定义 Transformer 模型
class TransformerModel(nn.Module):
    def __init__(self, src_vocab_size, tgt_vocab_size, d_model, nhead, num_layers, dim_feedforward):
        super(TransformerModel, self).__init__()
        self.src_embedding = nn.Embedding(src_vocab_size, d_model)
        self.tgt_embedding = nn.Embedding(tgt_vocab_size, d_model)
        self.transformer = nn.Transformer(d_model=d_model, nhead=nhead, num_encoder_layers=num_layers,
                                          num_decoder_layers=num_layers, dim_feedforward=dim_feedforward)
        self.fc = nn.Linear(d_model, tgt_vocab_size)

    def forward(self, src, tgt):
        src_embedded = self.src_embedding(src)
        tgt_embedded = self.tgt_embedding(tgt)
        output = self.transformer(src_embedded.permute(1, 0, 2), tgt_embedded.permute(1, 0, 2))
        output = self.fc(output.permute(1, 0, 2))
        return output


# 模型参数
d_model = 128
nhead = 4
num_layers = 2
dim_feedforward = 512

model = TransformerModel(src_vocab_size, tgt_vocab_size, d_model, nhead, num_layers, dim_feedforward)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss(ignore_index=tgt_vocab['<pad>'])
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = StepLR(optimizer, step_size=10, gamma=0.1)

# 训练模型
num_epochs = 100
for epoch in range(num_epochs):
    optimizer.zero_grad()
    output = model(src_tensor, tgt_tensor[:, :-1])
    loss = criterion(output.reshape(-1, tgt_vocab_size), tgt_tensor[:, 1:].reshape(-1))
    loss.backward()
    optimizer.step()
    scheduler.step()
    if (epoch + 1) % 10 == 0:
        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')


# 简单的推理示例
def translate(model, src_text, src_vocab, tgt_vocab):
    try:
        src_indices = [src_vocab[word] for word in src_text]
    except KeyError as e:
        print(f"输入词汇 {e} 不在源词汇表中。")
        return ""
    src_tensor = torch.tensor(src_indices, dtype=torch.long).unsqueeze(0)
    tgt_indices = [tgt_vocab['<pad>']]
    tgt_tensor = torch.tensor(tgt_indices, dtype=torch.long).unsqueeze(0)
    for i in range(max_tgt_len):
        output = model(src_tensor, tgt_tensor)
        pred = output.argmax(dim=-1)[:, -1]
        if pred.item() == tgt_vocab['<pad>']:
            break
        # 修改 pred 的维度，使其与 tgt_tensor 一致
        tgt_tensor = torch.cat([tgt_tensor, pred.unsqueeze(0)], dim=1)
    tgt_words = []
    for idx in tgt_tensor[0].tolist()[1:]:
        for word, id in tgt_vocab.items():
            if id == idx:
                tgt_words.append(word)
    return ' '.join(tgt_words)


src_text = ['你好']
translation = translate(model, src_text, src_vocab, tgt_vocab)
print(f'{src_text}翻译结果: {translation}')
