//
// Created by SongpingWang on 2025/2/5.
//

#include "captcha_recognize.h"
#include <torch/jit.h>
#include <iostream>
#include <torch/serialize/output-archive.h>
#include <torch/serialize/input-archive.h>


void captcha_recognize::Start(int BATCH_SIZE,
                              int CLASS_NUM,
                              int TOTAL_EPOCH,
                              const std::string& train_dir,
                              const std::string& valid_dir,
                              const std::string& save_path)
{
    // 创建数据集 >> 创建数据加载器
    auto train_dataset = CodeDataset(train_dir).map(CollateFn{});
    auto train_loader = torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
            std::move(train_dataset),
            torch::data::DataLoaderOptions().batch_size(BATCH_SIZE).workers(0).drop_last(false)
    );

    auto val_dataset = CodeDataset(valid_dir).map(CollateFn{});
    auto val_loader = torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
            std::move(val_dataset),
            torch::data::DataLoaderOptions().batch_size(BATCH_SIZE).workers(0).drop_last(false)
    );

    CUSTOM_ASSERT(cust_info.alphaNum_class == CLASS_NUM - 1, "alphaNum_class = CLASS_NUM-1(CLASS_NUM contains blank)");
    criterion = torch::nn::CTCLoss(torch::nn::CTCLossOptions().blank(CLASS_NUM - 1));
    auto optimizer = torch::optim::Adam(model->parameters(), torch::optim::AdamOptions(0.001));

    std::vector<double> accuracies;
    std::vector<double> losses;

    for (int epoch = 0; epoch < TOTAL_EPOCH; ++epoch) {
        std::cout << "Epoch " << epoch + 1 << "/" << TOTAL_EPOCH << std::endl;
        auto start = std::chrono::high_resolution_clock::now();
        model->train();
        int train_total = 0;
        int train_correct = 0;
        std::vector<double> run_loss;

        for (auto& batch : *train_loader) {
            auto images = batch.data;
            auto target = batch.target;
            auto target_lengths = batch.target_lengths;

            optimizer.zero_grad();
            auto outputs = model->forward(images);
            auto output_lengths = torch::full({ outputs.size(1) }, outputs.size(0), torch::kInt32);
            auto loss = criterion(outputs, target, output_lengths, target_lengths);
            loss.backward();
            optimizer.step();


            auto [c, t] = calculate_acc(cust_info, outputs, target, target_lengths);
            train_correct += c;
            train_total += t;
            run_loss.push_back(loss.item<double>());
        }

        double train_acc = 100.0 * train_correct / train_total;
        double train_loss = std::accumulate(run_loss.begin(), run_loss.end(), 0.0) / run_loss.size();
        /////////////////////////////////////////
        /////////////////////////////////////////

        model->eval();
        int eval_total = 0;
        int eval_correct = 0;

        torch::NoGradGuard no_grad;
        for (auto& batch : *val_loader) {
            auto images = batch.data;
            auto target = batch.target;
            auto target_lengths = batch.target_lengths;

            auto outputs = model->forward(images);
            auto output_lengths = torch::full({ outputs.size(1) }, outputs.size(0), torch::kInt32);
            auto loss = criterion(outputs, target, output_lengths, target_lengths);

            auto [c, t] = calculate_acc(cust_info, outputs, target, target_lengths);
            eval_correct += c;
            eval_total += t;
            run_loss.push_back(loss.item<double>());
        }

        double val_acc = 100.0 * eval_correct / eval_total;
        double val_loss = std::accumulate(run_loss.begin(), run_loss.end(), 0.0) / run_loss.size();



        if (accuracies.empty() || val_acc > *std::max_element(accuracies.begin(), accuracies.end())) {
            torch::save(model, save_path + "start.pt");
            export_weight(save_path + "model_state.pt");
        }
        if (train_loss < 0.0065) {
            torch::save(model, save_path + "best_1.pth");
            export_weight(save_path + "model_state1.pt");
        }
        if (train_acc > 98) {
            torch::save(model, save_path + "best_2.pth");
            export_weight(save_path + "model_state2.pt");
        }
        if (val_acc > 98) {
            torch::save(model, save_path + "best_3.pth");
            export_weight(save_path + "model_state3.pt");
        }

        accuracies.push_back(val_acc);
        losses.push_back(val_loss);

        auto end = std::chrono::high_resolution_clock::now();
        std::chrono::duration<double> elapsed = end - start;
        std::cout << elapsed.count() << "s loss: " << train_loss << " - accuracy: " << train_acc
                  << " - val_loss: " << val_loss << " - val_accuracy: " << val_acc << std::endl;
    }
}


std::pair<int, int> captcha_recognize::calculate_acc(const CustInfo& cust_info,
                                                     const torch::Tensor& output,
                                                     const torch::Tensor& target,
                                                     const torch::Tensor& target_lengths) {
    auto output_argmax = output.argmax(-1).permute({ 1, 0 }); // [N, T]

    int correct_num = 0;
    for (int i = 0; i < output_argmax.size(0); ++i) {
        auto predict_tuple = torch::unique_consecutive(output_argmax[i]);

        // 从元组中提取张量
        auto predict = std::get<0>(predict_tuple);

        // 手动实现 masked_select 功能
        std::vector<int64_t> masked_predict;
        for (int64_t j = 0; j < predict.size(0); ++j) {
            if (predict[j].item<int64_t>() != (cust_info.alphaNum_class)) {
                masked_predict.push_back(predict[j].item<int64_t>());
            }
        }
        auto predict_tensor = torch::tensor(masked_predict, torch::kInt64);

        const int label_length = target_lengths[i].item<int>();
        if (predict_tensor.size(0) == label_length &&
            torch::all(predict_tensor == target[i].slice(0, 0, label_length)).item<bool>()) {
            correct_num++;
        }
    }
    return { correct_num, target.size(0) };
}



void captcha_recognize::export_weight(const std::string& weight_path) {
    /*
    std::string script_torchscript = weight_path + "script.TorchScript";
    std::string trace_torchscript = weight_path + "trace.TorchScript";

    // 使用 script 方法将模型转换为 TorchScript: 保存 TorchScript 模型
    try {
        torch::jit::Module scripted_model = torch::jit::script(model);
        scripted_model.save(script_torchscript);
        std::cout << "Model exported to TorchScript successfully." << std::endl;
    }
    catch (const c10::Error& e) {
        std::cerr << "script导出模型失败: " << e.what() << std::endl;
        return;
    }

    // 使用 trace 方法将模型转换为 TorchScript: 创建一个示例输入张量 >> 保存 TorchScript 模型
    std::vector<torch::jit::IValue> input_samples;
    torch::Tensor example_input = torch::randn({ 1, 3, cust_info.max_height, cust_info.max_width });
    input_samples.emplace_back(example_input);
    try {
        torch::jit::Module traced_model = torch::jit::trace(model, input_samples);
        traced_model.save(trace_torchscript);
    }
    catch (const c10::Error& e) {
        std::cerr << "trace导出模型失败: " << e.what() << std::endl;
        return;
    }
     */

    // 保存模型的状态字典
    torch::serialize::OutputArchive archive;
    model->save(archive);
    archive.save_to(weight_path);
    std::cout << "weight_path" << weight_path << std::endl;
}


