﻿#include <torch/torch.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>
#include <filesystem>
#include <ctime>  // 添加头文件，用于记录时间
#include <net.h>

//#include "matplotlibcpp.h" // Matplotlib C++ 接口
#define NEED_TRAIN 
//namespace plt = matplotlibcpp;

// 超参数
const int batch_size = 64;
const double learning_rate = 0.01;
const int momentum = 0.5;
const int EPOCH = 10;
const std::string weight_file = "../weights/mnist_cnn_weights.pt"; // 权重文件路径

// 检查 CUDA 是否可用
torch::Device device(torch::kCPU); // 默认选择 CPU


// OpenCV Mat 转为 torch Tensor
torch::Tensor matToTensor(const cv::Mat& image) {
    cv::Mat gray;
    cv::cvtColor(image, gray, cv::COLOR_BGR2GRAY); // 转为灰度图
    cv::Mat resized;
    cv::resize(gray, resized, cv::Size(28, 28)); // 调整为 28x28
    resized.convertTo(resized, CV_32F, 1.0 / 255); // 归一化到 [0, 1]

    //auto tensor = torch::from_blob(resized.data, {1, 1, 28, 28}, torch::kFloat32); // 创建张量
    //tensor = tensor.clone().to(device); // 迁移到 GPU

    // 假设输入 tensor 的范围是 [0, 1]，接下来对每个像素值执行归一化
    auto tensor = torch::from_blob(resized.data, { 1, 1, 28, 28 }, torch::kFloat32).clone();
    // 使用相同的均值和标准差进行归一化 torch.sub(input, other, *, alpha=1, out=None) → Tensor
    //从 input 中减去 other ，并乘以 alpha 。
    tensor = tensor.sub(0.1307).div(0.3081);
    // 注意：如果归一化操作需要转移到 device，需在最后调用 .to(device)
    return tensor;
}

// 推理函数
void inference(std::shared_ptr<Net> model, const std::string& image_path) {
    // 使用 OpenCV 加载图片
    cv::Mat image = cv::imread(image_path);
    if (image.empty()) {
        std::cerr << "Error: Could not open or find the image!" << std::endl;
        return;
    }

    // 转为 Tensor
    auto input_tensor = matToTensor(image);

    // 模型推理
    model->eval();
    input_tensor = input_tensor.to(device); // 确保推理数据位于 GPU
    auto output = model->forward(input_tensor);

    std::cout << "output tensor: " << output << std::endl;
    auto prediction = output.argmax(1);
    auto probabilities = torch::softmax(output, 1);
    // 获取预测类别的置信度
    auto confidence = probabilities[0][prediction.item<int>()].item<float>();
    std::cout << "Predicted Label: " << prediction.item<int>() << " Confidence-> " << confidence << std::endl;
}
int main() {
#if USECUDA_BOOL_CONST
    if (!torch::cuda::is_available()) {
        std::cerr << "CUDA is not available. Switching to CPU..." << std::endl;
        device = torch::Device(torch::kCPU); // 如果 GPU 不可用，切换到 CPU
    }
    else {
        std::cerr << "CUDA is available...." << std::endl;
        device = torch::Device(torch::kCUDA); // 如果 GPU 不可用，切换到 CPU
    }
    std::cout << "USECUDA_BOOL_CONST is true" << std::endl;
#else
    std::cout << "USECUDA_BOOL_CONST is false" << std::endl;
#endif
    try {
#if defined(NEED_TRAIN)
        // 数据加载与转换
        auto train_dataset = torch::data::datasets::MNIST("../data/mnist")
            .map(torch::data::transforms::Normalize<>(0.1307, 0.3081))
            .map(torch::data::transforms::Stack<>());
        auto test_dataset = torch::data::datasets::MNIST("../data/mnist", torch::data::datasets::MNIST::Mode::kTest)
            .map(torch::data::transforms::Normalize<>(0.1307, 0.3081))
            .map(torch::data::transforms::Stack<>());

        auto train_loader = torch::data::make_data_loader<torch::data::samplers::RandomSampler>(
            std::move(train_dataset), batch_size);
        auto test_loader = torch::data::make_data_loader<torch::data::samplers::SequentialSampler>(
            std::move(test_dataset), batch_size);

        // 实例化模型、损失函数和优化器
        auto model = std::make_shared<Net>();
        // 加载预训练权重

        // 检查文件是否存在
        int64_t epoch_count = 0;
        if (std::filesystem::exists(weight_file)) {
            std::cout << "权重文件存在，基于之前的权重继续训练。" << weight_file << std::endl;
            // 你可以在这里加载图像
            // 创建输入 Archive 并从文件加载数据
            torch::serialize::InputArchive archive;
            archive.load_from(weight_file);

            // 加载模型权重
            model->load(archive);
            std::cout << "Model weights loaded successfully!" << std::endl;

            // 读取训练轮数
            try {
                torch::Tensor epoch_tensor;
                archive.read("epoch_count", epoch_tensor);
                int64_t epoch_count = epoch_tensor.item<int64_t>();
                std::cout << "[Weight info]:Training Epoch Count: " << epoch_count << std::endl;
            }
            catch (const c10::Error& e) {
                std::cerr << "Warning: 'epoch_count' not found in saved model!" << std::endl;
            }
            
            // 读取训练完成时间
            try {
                torch::Tensor time_tensor;
                archive.read("finished_time", time_tensor);
                auto time_data = time_tensor.data_ptr<int64_t>();
                std::string finished_time;
                for (size_t i = 0; i < time_tensor.size(0); ++i) {
                    finished_time += static_cast<char>(time_data[i]);
                }
                std::cout << "Training Finished Time: " << finished_time << std::endl;
            }
            catch (const c10::Error& e) {
                std::cerr << "[Weight info]:Warning: 'finished_time' not found in saved model!" << std::endl;
            }
        }
        else {
            std::cout << "权重文件不存在,训练结束后会生成新的 " << weight_file << std::endl;
        }
        
        model->to(device); // 将模型迁移到 GPU
        torch::optim::SGD optimizer(model->parameters(), torch::optim::SGDOptions(learning_rate).momentum(momentum));

        // 训练
        for (int epoch = 0; epoch < EPOCH; ++epoch) {
            model->train();
            size_t batch_index = 0;
            for (auto& batch : *train_loader) {
                auto data = batch.data.to(device); // 数据迁移到 GPU
                auto target = batch.target.to(device); // 标签迁移到 GPU
                optimizer.zero_grad();
                auto output = model->forward(data);

                auto parameters = model->named_parameters();

                for (auto& parameter : parameters)
                {
                    //std::cout << parameter << std::endl;
                }

                //std::cout << "Conv1 Kernel Information:" << std::endl;
                //std::cout << "Kernel Shape: " << conv1->weight.size() << std::endl;
                //std::cout << "Kernel Weights: " << conv1->weight << std::endl;
                
                auto loss = torch::nn::functional::cross_entropy(output, target);
                loss.backward();
                optimizer.step();

                if (++batch_index % 10 == 0) {//每10打印一下
                    std::cout << "Epoch [" << epoch + 1 << "] Batch [" << batch_index
                        << "] Loss: " << loss.item<double>() << std::endl;
                }
            }

            // 测试
            model->eval();
            size_t correct = 0;
            size_t total = 0;
            for (const auto& batch : *test_loader) {
                auto data = batch.data.to(device); // 数据迁移到 GPU
                auto target = batch.target.to(device); // 标签迁移到 GPU
                auto output = model->forward(data);
                auto pred = output.argmax(1);
                correct += pred.eq(target).sum().item<int64_t>();
                total += target.size(0);
            }
            double accuracy = static_cast<double>(correct) / total * 100.0;
            std::cout << "Epoch [" << epoch + 1 << "] Accuracy: " << accuracy << "%" << std::endl;
        }

        // 保存权重
        model->to(torch::kCPU);//为了兼容CPU模式
        // 构造元数据：记录训练轮数和训练完成的时间
        // 创建输出 Archive，并保存模型参数
        torch::serialize::OutputArchive archive;
        model->save(archive);

        // 将训练轮数转换为 int64_t Tensor 后写入 archive
        std::cout << "本次完成训练次数： " << EPOCH << std::endl; 
        std::cout << "之前训练次数： " << epoch_count << std::endl;

        archive.write("epoch_count", torch::tensor(static_cast<int64_t>(EPOCH + epoch_count))); //训练测试累计

        // 获取当前时间并转换为字符串
        std::time_t now = std::time(nullptr);
        char time_buffer[80];
        std::strftime(time_buffer, sizeof(time_buffer), "%Y-%m-%d %H:%M:%S", std::localtime(&now));
        std::string finished_time_str(time_buffer);
        // 将字符串转换为一个 int64_t 数组（ASCII 编码形式）
        std::vector<int64_t> finished_time_ascii;
        for (char ch : finished_time_str) {
            finished_time_ascii.push_back(static_cast<int64_t>(ch));
        }
        archive.write("finished_time", torch::tensor(finished_time_ascii, torch::TensorOptions(torch::kInt64)));

        // 将 Archive 保存到文件中
        archive.save_to(weight_file);
        std::cout << "Model weights and metadata saved to " << std::filesystem::current_path() << weight_file << std::endl;
#endif
        // 加载权重并进行推理
        auto inference_model = std::make_shared<Net>();
        torch::load(inference_model, weight_file);
        inference_model->to(device); // 推理模型迁移到 GPU
        
        std::cout << "Model weights loaded for inference " << weight_file << std::endl;
        // 调用推理功能（指定本地图片路径）
        std::string image_path = "../data/0.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        image_path = "../data/1.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        image_path = "../data/2.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        image_path = "../data/3.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        image_path = "../data/4.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        image_path = "../data/5.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        image_path = "../data/6.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        image_path = "../data/7.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        image_path = "../data/8.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        image_path = "../data/9.jpg"; // 本地图片路径
        inference(inference_model, image_path);
        /**/
    }
    catch (const std::exception& e) {
        std::cerr << "Runtime error: " << e.what() << std::endl;
        return -1;
    }
    return 0;
}
