﻿#include "chEnModel.h"
#include <sstream>
#include <algorithm>

chEnModel::chEnModel(const std::string& model_path, const Vocab& src_vocab, const Vocab& tar_vocab)
    : m_src_vocab(src_vocab), m_tar_vocab(tar_vocab) {
    // 设备初始化
    m_device = torch::cuda::is_available() ? torch::Device(torch::kCUDA, 0) : torch::Device(torch::kCPU);

    // 加载TorchScript模型
    try {
        m_model = torch::jit::load(model_path);
        m_model.to(m_device);
        m_model.eval();
    }
    catch (const c10::Error& e) {
        throw std::runtime_error("Model loading failed: " + std::string(e.what()));
    }
}

torch::Tensor chEnModel::preprocess_input(const std::string& src_sentence, int num_steps) {
    // 分词并转换为小写
    std::vector<std::string> tokens;
    std::istringstream iss(src_sentence);
    std::string token;
    while (iss >> token) {
        std::transform(token.begin(), token.end(), token.begin(), ::tolower);
        tokens.push_back(token);
    }
    tokens.push_back("<eos>"); // 添加结束符

    // 转换为索引并填充/截断
    std::vector<int> indices;
    for (const auto& t : tokens) {
        indices.push_back(m_src_vocab[t]);
    }
    if (indices.size() < num_steps) {
        indices.resize(num_steps, m_src_vocab.token2idx.count("<pad>") ? m_src_vocab["<pad>"] : 0);
    }
    else {
        indices.resize(num_steps);
    }

    // 创建输入张量并移动
    return torch::tensor(indices, torch::kLong).unsqueeze(0).to(m_device);
}

std::string chEnModel::predict(const std::string& src_sentence, int num_steps) {
    torch::NoGradGuard no_grad;

    // 1. 输入预处理
    torch::Tensor enc_input = preprocess_input(src_sentence, num_steps);

    // 2. 编码器前向传播
    auto encoder = m_model.attr("encoder").toModule();
    auto enc_output = encoder.forward({ enc_input }).toTuple();
    torch::Tensor enc_state = enc_output->elements()[1].toTensor(); // 获取编码器状态 (num_layers, batch, hidden)

    // 3. 解码器初始化
    auto decoder = m_model.attr("decoder").toModule();
    torch::Tensor dec_input = torch::tensor({ {m_tar_vocab["<bos>"]} }, torch::kLong).to(m_device); // 初始输入为<bos>
    std::vector<int> output_seq;

    const int max_gen_steps = 100;
    for (int step = 0; step < max_gen_steps; ++step) {
        // 准备解码器输入（输入序列和状态）
        std::vector<c10::IValue> decoder_inputs = { dec_input, enc_state };
        auto decoder_output = decoder.forward(decoder_inputs).toTuple();

        // 获取输出和新状态
        torch::Tensor logits = decoder_output->elements()[0].toTensor(); // (batch, seq_len, vocab_size)
        enc_state = decoder_output->elements()[1].toTensor(); // 更新后的状态

        // 选择概率最高的token        这里的采样逻辑存在很大缺陷，但是主包懒得搞了
        dec_input = logits.argmax(2).detach(); // (batch, seq_len) -> (1,1)
        int current_token = dec_input.item<int>();

        if (current_token == m_tar_vocab["<eos>"] || output_seq.size() >= num_steps) {
            break;
        }
        output_seq.push_back(current_token);
    }

    // 转换为文本
    std::string translation;
    for (int idx : output_seq) {
        translation += m_tar_vocab.to_tokens(idx) + " ";
    }
    if (!translation.empty()) {
        translation.pop_back(); // 移除末尾空格
    }
    return translation;
}