import torch
import torch.nn as nn
import codecs
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction

CHECKPOINT_PATH = "./data/seq2seq_ckpt.pth"

# 模型参数。必须与训练时的模型参数保持一致。
HIDDEN_SIZE = 1024
NUM_LAYERS = 2
SRC_VOCAB_SIZE = 10000
TRG_VOCAB_SIZE = 4000
SHARE_EMB_AND_SOFTMAX = True

SOS_ID = 1
EOS_ID = 2

SRC_VOCAB = "./data/en.vocab"
TRG_VOCAB = "./data/zh.vocab"


class NMTModel(nn.Module):
    def __init__(self):
        super(NMTModel, self).__init__()
        self.encoder = nn.LSTM(HIDDEN_SIZE, HIDDEN_SIZE, NUM_LAYERS, batch_first=True)
        self.decoder = nn.LSTM(HIDDEN_SIZE, HIDDEN_SIZE, NUM_LAYERS, batch_first=True)

        self.src_embedding = nn.Embedding(SRC_VOCAB_SIZE, HIDDEN_SIZE)
        self.trg_embedding = nn.Embedding(TRG_VOCAB_SIZE, HIDDEN_SIZE)

        if SHARE_EMB_AND_SOFTMAX:
            self.fc_out = nn.Linear(HIDDEN_SIZE, TRG_VOCAB_SIZE)
            self.fc_out.weight = self.trg_embedding.weight
        else:
            self.fc_out = nn.Linear(HIDDEN_SIZE, TRG_VOCAB_SIZE)

    def forward(self, src_input, max_dec_len=100):
        src_size = torch.tensor([len(src_input)], dtype=torch.long)
        src_input = torch.tensor([src_input], dtype=torch.long)
        src_emb = self.src_embedding(src_input)

        enc_outputs, enc_state = self.encoder(src_emb)

        trg_ids = [SOS_ID]
        dec_state = enc_state

        for step in range(max_dec_len - 1):
            trg_input = torch.tensor([[trg_ids[-1]]], dtype=torch.long)
            trg_emb = self.trg_embedding(trg_input)
            dec_output, dec_state = self.decoder(trg_emb, dec_state)
            output = dec_output.squeeze(0)
            logits = self.fc_out(output)
            next_id = torch.argmax(logits).item()
            trg_ids.append(next_id)
            if next_id == EOS_ID:
                break

        return trg_ids


def load_vocab(vocab_file):
    with codecs.open(vocab_file, 'r', 'utf-8') as vocab:
        word_list = [w.strip() for w in vocab.readlines()]
    return dict((word_list[x], x) for x in range(len(word_list)))


def encode_sentence(sentence, vocab_dict):
    sentence += " <eos>"
    return [(vocab_dict[word] if word in vocab_dict else vocab_dict['<unk>']) for word in sentence.split()]


def decode_output(output_ids, vocab_list):
    return ''.join([vocab_list[id_] for id_ in output_ids[1:-1]])


def calculate_bleu(reference, candidate):
    smoothing_function = SmoothingFunction().method4
    bleu_scores = {
        'BLEU-1': sentence_bleu([reference], candidate, weights=(1, 0, 0, 0), smoothing_function=smoothing_function),
        'BLEU-2': sentence_bleu([reference], candidate, weights=(0.5, 0.5, 0, 0), smoothing_function=smoothing_function),
        'BLEU-3': sentence_bleu([reference], candidate, weights=(0.33, 0.33, 0.33, 0), smoothing_function=smoothing_function)
    }
    return bleu_scores


def calculate_perplexity(model, input_ids, target_ids):
    model.eval()
    src_input = torch.tensor([input_ids], dtype=torch.long)
    src_emb = model.src_embedding(src_input)
    enc_outputs, enc_state = model.encoder(src_emb)

    # Adjust the shape of enc_state to match decoder's expected input
    h_n, c_n = enc_state
    expanded_h_n = h_n.expand(-1, len(target_ids) - 1, -1).contiguous()
    expanded_c_n = c_n.expand(-1, len(target_ids) - 1, -1).contiguous()
    enc_state = (expanded_h_n, expanded_c_n)

    trg_inputs = torch.tensor(target_ids[:-1], dtype=torch.long).unsqueeze(1)
    trg_embs = model.trg_embedding(trg_inputs)
    dec_outputs, _ = model.decoder(trg_embs, enc_state)

    logits = model.fc_out(dec_outputs).squeeze(1)
    loss_fn = nn.CrossEntropyLoss(reduction='sum')
    loss = loss_fn(logits, torch.tensor(target_ids[1:], dtype=torch.long))

    ppl = torch.exp(loss / len(target_ids)).item()
    return ppl


def main():
    model = NMTModel()

    checkpoint = torch.load(CHECKPOINT_PATH, map_location='cpu', weights_only=True)
    model.load_state_dict(checkpoint['model_state_dict'], strict=False)
    model.eval()

    test_sentence = "This is a test ."
    reference_translation = "这是一个测试 。"  # 假设这是参考译文

    src_id_dict = load_vocab(SRC_VOCAB)
    test_en_ids = encode_sentence(test_sentence, src_id_dict)

    trg_id_dict = load_vocab(TRG_VOCAB)
    trg_vocab_list = list(trg_id_dict.keys())
    reference_ids = encode_sentence(reference_translation, trg_id_dict)

    output_ids = model.forward(test_en_ids)
    output_text = decode_output(output_ids, trg_vocab_list)

    print("Input:", test_sentence)
    print("Reference Translation:", reference_translation)
    print("Generated Translation:", output_text)

    candidate_ids = output_ids[1:-1]

    bleu_scores = calculate_bleu(reference_ids, candidate_ids)
    print(f"Bleu Scores: {bleu_scores}")

    perplexity = calculate_perplexity(model, test_en_ids, reference_ids)
    print(f"Perplexity: {perplexity}")


if __name__ == "__main__":
    main()



