//
// Created by SongpingWang on 2025/2/5.
//

#include "captcha_recognize_new.h"
#include <torch/jit.h>
#include <torch/script.h>
#include <iostream>
#include <torch/serialize/input-archive.h>
#include <torch/optim/schedulers/lr_scheduler.h>
#include <torch/csrc/jit/frontend/tracer.h>





void captcha_recognize::Start(int BATCH_SIZE,
                              int TOTAL_EPOCH,
                              const std::string &train_dir,
                              const std::string &valid_dir,
                              const std::string &save_path) {
    // 创建数据集 >> 创建数据加载器
    auto train_dataset = OCRDataset(train_dir).map(CollateFn{});
    auto train_loader = torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
            std::move(train_dataset),
            torch::data::DataLoaderOptions().batch_size(BATCH_SIZE).workers(0).drop_last(false)
    );

    auto val_dataset = OCRDataset(valid_dir).map(CollateFn{});
    auto val_loader = torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
            std::move(val_dataset),
            torch::data::DataLoaderOptions().batch_size(BATCH_SIZE).workers(0).drop_last(false)
    );

    // 训练模型
    criterion = torch::nn::CTCLoss(torch::nn::CTCLossOptions().blank(0));
    auto optimizer = torch::optim::Adam(model->parameters(), torch::optim::AdamOptions(0.00125));

    std::vector<double> accuracies;
    std::vector<double> losses;

    for (int epoch = 0; epoch < TOTAL_EPOCH; ++epoch) {
        std::cout << "[Epoch " << epoch + 1 << "/" << TOTAL_EPOCH << "] ";
        auto start = std::chrono::high_resolution_clock::now();
        model->train();
        int train_total = 0;
        int train_correct = 0;
        std::vector<double> train_losses;

        for (auto &batch: *train_loader) {
            auto images = batch.data;                       // [N, C, H, W] => [batch_size, 3, 100, 200]
            auto target = batch.target;                     // [N, max_len] => [batch_size, max_len]
            auto target_lengths = batch.target_lengths;     // [N]          => [batch_size]

            optimizer.zero_grad();
            auto outputs = model->forward(images);                                       // [T, N, C]    => [50, batch_size, 69]
            auto output_lengths = torch::full({outputs.size(1)}, outputs.size(0), torch::kInt32);  // [N]      => [batch_size]
            auto loss = criterion(outputs, target, output_lengths, target_lengths);
            loss.backward();
            optimizer.step();


            auto [train_c, train_t] = calculate_acc(cust_info, outputs, target, target_lengths);
            train_correct += train_c;
            train_total += train_t;
            train_losses.push_back(loss.item<double>());
        }
        double train_acc = 100.0 * train_correct / train_total;
        double train_loss = std::accumulate(train_losses.begin(), train_losses.end(), 0.0) / train_losses.size();
        /////////////////////////////////////////
        /////////////////////////////////////////

        model->eval();
        int eval_total = 0;
        int eval_correct = 0;
        std::vector<double> val_losses;

        torch::NoGradGuard no_grad;
        for (auto &batch: *val_loader) {
            auto images = batch.data;
            auto target = batch.target;
            auto target_lengths = batch.target_lengths;

            auto outputs = model->forward(images);
            auto output_lengths = torch::full({outputs.size(1)}, outputs.size(0), torch::kInt32);
            auto loss = criterion(outputs, target, output_lengths, target_lengths);

            auto [eval_c, eval_t] = calculate_acc(cust_info, outputs, target, target_lengths);
            eval_correct += eval_c;
            eval_total += eval_t;
            val_losses.push_back(loss.item<double>());
        }

        double val_acc = 100.0 * eval_correct / eval_total;
        double val_loss = std::accumulate(val_losses.begin(), val_losses.end(), 0.0) / val_losses.size();

        save_model(save_path + "start_model_with_graph_11111.pt");
        if (accuracies.empty() || val_acc > *std::max_element(accuracies.begin(), accuracies.end())) {
            save_model(save_path + "start_model_with_graph_20250221.pt");
            torch::save(model, save_path + "start_20250221.pt");
        }
        if (train_loss < 0.0065) {
            save_model(save_path + "start_model_with_graph1_20250221.pt");
            torch::save(model, save_path + "best_1_20250221.pth");
        }
        if (train_acc > 98) {
            save_model(save_path + "start_model_with_graph2_20250221.pt");
            torch::save(model, save_path + "best_2_20250221.pth");
        }
        if (val_acc > 98) {
            save_model(save_path + "start_model_with_graph3_20250221.pt");
            torch::save(model, save_path + "best_3_20250221.pth");
        }

        accuracies.push_back(val_acc);
        losses.push_back(val_loss);

        auto end = std::chrono::high_resolution_clock::now();
        std::chrono::duration<double> elapsed = end - start;
        std::cout << elapsed.count() << "s\tloss: " << train_loss << "\t- accuracy: " << train_acc
                  << "\t- val_loss: " << val_loss << "\t- val_accuracy: " << val_acc << std::endl;
    }
}


std::pair<int, int> captcha_recognize::calculate_acc(const CUSTOM_INFO& cust_info,
    const torch::Tensor& output,                              // [T, N, C] => [50, batch_size, 69]
    const torch::Tensor& target,
    const torch::Tensor& target_lengths) {
    auto output_argmax = output.argmax(-1).permute({ 1, 0 }); // [N, T]    => [batch_size, 50]
    
    int correct_num = 0;
    for (int i = 0; i < output_argmax.size(0); ++i) {
        auto output_i = output_argmax[i];                     // [50]
        auto mask = output_i != 0;                            // masked_select
        auto filtered = output_i.masked_select(mask);
        
        // 从元组中提取张量
        auto predict_tuple = torch::unique_consecutive(filtered);
        auto predict = std::get<0>(predict_tuple);

        const int label_length = target_lengths[i].item<int>();
        if (predict.size(0) == label_length &&
            torch::all(predict == target[i].slice(0, 0, label_length)).item<bool>()) {
            correct_num++;
        }
    }
    return { correct_num, target.size(0) };
}




void captcha_recognize::save_model(const std::string& save_path)
{
    try {
        model->eval();
        torch::Tensor dummy_input = torch::randn({ 1, 3, 100, 200 }).detach();
        auto traced_fn = [this](torch::jit::Stack inputs) -> torch::jit::Stack {
            auto input = inputs[0].toTensor();
            auto output = model->forward(input);
            return { output };
        };
        auto [tracing_state, output_stack] = torch::jit::tracer::trace(
            torch::jit::Stack{ dummy_input }, // 输入 Stack
            traced_fn,                        // 前向传播函数
            [](const torch::autograd::Variable&) { return ""; }, // 变量名生成函数（可选）
            true,                             // strict 模式
            false,                            // force_outplace
            nullptr                           // self 参数（若模型是 Module 的子类需传递指针）
        );
        std::shared_ptr<torch::jit::Graph> graph = tracing_state->graph;
        torch::jit::script::Module traced_model("Model");
        traced_model.set_optimized(true);
        size_t param_idx = 0;

        // 注册顶层参数和缓冲区
        for (const auto& param_pair : model->named_parameters()) {
            traced_model.register_parameter(param_pair.key(), param_pair.value().clone(), true);
        }
        for (const auto& buffer_pair : model->named_buffers()) {
            traced_model.register_buffer(buffer_pair.key(), buffer_pair.value().clone());
        }

        // 递归注册子模块的参数和缓冲区
        //for (const auto& submodule_pair : model->children()) {
        //    auto* submodule = traced_model.find_module(submodule_pair.key());
        //    if (submodule) {
        //        register_parameters_and_buffers(*submodule, *submodule_pair.value());
        //    }
        //}


        for (const auto& param : model->parameters()) {
            traced_model.register_parameter("param_" + std::to_string(param_idx++),
                param.clone(),
                true);
        }

        auto method = traced_model._ivalue()->compilation_unit()->create_function(
            "forward",
            graph
        );
        traced_model.type()->addMethod(method);
        traced_model.save(save_path);
    }
    catch (const std::exception& e) {
        std::cerr << "Error saving model: " << e.what() << std::endl;
    }
}
