#ifndef DATASET_H
#define DATASET_H

#include <iostream>
#include <vector>
#include <string>
#include <cstring>
#include <filesystem>
#include <algorithm>
#include <fstream>
#include <torch/torch.h>
#include <opencv2/opencv.hpp>


#define CUSTOM_ASSERT(condition, message) \
    do { \
        if (!(condition)) { \
            std::cerr << "Assertion failed: " << message << " | " \
                      << "File: " << __FILE__ << " | " \
                      << "Line: " << __LINE__ << " | " \
                      << "Function: " << __func__ << "\n"; \
            std::abort(); \
        } \
    } while (0)


struct CustInfo {
    const std::string alphaNum = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+&~!@#";
    int alphaNum_class = 68;
    int max_height = 100;    // train image.height
    int max_width = 200;     // train image.width
};



class CodeDataset : public torch::data::Dataset<CodeDataset> {
public:

    explicit CodeDataset(const std::string& basedir) {
        for (const auto& entry : std::filesystem::directory_iterator(basedir)) {
            if (entry.path().extension() == ".png" || entry.path().extension() == ".jpg") {
                files.emplace_back(std::move(entry.path().string()));
            }
        }
    }


    torch::data::Example<> get(size_t index) override {
        cv::Mat image = cv::imread(files[index], cv::IMREAD_COLOR);
        CUSTOM_ASSERT(image.rows == cust_info.max_height && image.cols == cust_info.max_width, "Image size is not correct");
        cv::cvtColor(image, image, cv::COLOR_BGR2RGB);

        // Convert to tensor   HWC -> CHW
        torch::Tensor tensor_image = torch::from_blob(
                image.data,
                { image.rows, image.cols, 3 },
                torch::kByte
        ).permute({ 2, 0, 1 })
                .to(torch::kFloat32)
                .div(255)
                .sub(0.5)
                .div(0.5);

        // Process labels
        std::string filename = std::filesystem::path(files[index]).stem().string();
        std::string label_str = filename.substr(0, filename.find('_'));

        torch::Tensor tensor_label = torch::empty({ static_cast<int64_t>(label_str.size()) }, torch::kLong);

        for (int i = 0; i < label_str.size(); ++i) {
            size_t pos = cust_info.alphaNum.find(label_str[i]);
            tensor_label[i] = static_cast<int64_t>(pos);
        }

        return { tensor_image, tensor_label };
    }

    [[nodiscard]] torch::optional<size_t> size() const override { return files.size(); }

private:
    std::vector<std::string> files;
    CustInfo cust_info;
};


/*
torch::data::Example<> collate_fn(const std::vector<torch::data::Example<>>& batch) {
	std::vector<torch::Tensor> images;
	std::vector<torch::Tensor> labels;
	std::vector<int64_t> seq_lengths;

	// 提取图像和标签
	for (auto& example : batch) {
		images.push_back(example.data);
		labels.push_back(example.target);
		seq_lengths.push_back(example.target.size(0));
	}

	// 合并图像张量 [N, C, H, W]
	auto image_tensor = torch::stack(images);

	// 合并标签张量 [N, max_seq_len]
	int64_t max_len = *std::max_element(seq_lengths.begin(), seq_lengths.end());
	auto label_tensor = torch::zeros({ static_cast<int64_t>(batch.size()), max_len }, torch::kLong);

	for (int i = 0; i < batch.size(); ++i) {
		label_tensor[i].slice(0, 0, labels[i].size(0)) = labels[i];
	}

	return { image_tensor, label_tensor };
}
*/


//// 定义单个样本类型和批次类型
//using InputBatchType = std::vector<torch::data::Example<>>;     // 输入批次类型（多个样本的集合）
//using OutputBatchType = torch::data::Example<>;                  // 输出批次类型（合并后的张量）
//
//struct CollateFn : public torch::data::transforms::BatchTransform<InputBatchType, OutputBatchType> {
//    // 必须定义的类型别名
//    using InputBatchType = std::vector<torch::data::Example<>>;
//    using OutputBatchType = torch::data::Example<>;
//
//    // 实现基类要求的接口
//    OutputBatchType apply_batch(InputBatchType input_batch) override {
//        std::vector<torch::Tensor> images;
//        std::vector<torch::Tensor> labels;
//        std::vector<int64_t> seq_lengths;
//
//        for (auto& example : input_batch) {
//            images.push_back(example.data);
//            labels.push_back(example.target);
//            seq_lengths.push_back(example.target.size(0));
//        }
//
//        // 合并图像 [N, C, H, W]
//        auto image_tensor = torch::stack(images);
//
//        // 合并标签（带填充）
//        int64_t max_len = 0;
//        for (auto& label : labels) {
//            max_len = std::max(max_len, label.size(0));
//        }
//        /*int64_t max_len = *std::max_element(seq_lengths.begin(), seq_lengths.end());*/
//        auto label_tensor = torch::zeros({ static_cast<int64_t>(labels.size()), max_len }, torch::kLong);
//
//        for (int i = 0; i < labels.size(); ++i) {
//            label_tensor[i].slice(0, 0, labels[i].size(0)) = labels[i];
//        }
//
//        /*auto seq_lengths_tensor = torch::tensor(seq_lengths, torch::kInt32);*/
//        /*return { image_tensor, label_tensor, seq_lengths_tensor }*/
//        return { image_tensor, label_tensor }; // 返回合并后的批次
//    }
//};



struct CustomExample {
    torch::Tensor data;
    torch::Tensor target;
    torch::Tensor target_lengths;
};


struct CollateFn : public torch::data::transforms::BatchTransform<std::vector<torch::data::Example<>>, CustomExample> {
    // 必须定义的类型别名
    using InputBatchType = std::vector<torch::data::Example<>>;
    using OutputBatchType = CustomExample;

    // 实现基类要求的接口
    OutputBatchType apply_batch(InputBatchType input_batch) override {
        std::vector<torch::Tensor> images;
        std::vector<torch::Tensor> labels;
        std::vector<int64_t> seq_lengths;

        for (auto& example : input_batch) {
            images.push_back(example.data);
            labels.push_back(example.target);
            seq_lengths.push_back(example.target.size(0));
        }

        // 合并图像 [N, C, H, W]
        auto image_tensor = torch::stack(images);

        // 合并标签（带填充）
        int64_t max_len = *std::max_element(seq_lengths.begin(), seq_lengths.end());
        auto label_tensor = torch::zeros({ static_cast<int64_t>(labels.size()), max_len }, torch::kLong);

        for (int i = 0; i < labels.size(); ++i) {
            label_tensor[i].slice(0, 0, labels[i].size(0)) = labels[i];
        }

        // 将 seq_lengths 转换为 torch::Tensor
        auto seq_lengths_tensor = torch::tensor(seq_lengths, torch::kInt32);

        return { image_tensor, label_tensor, seq_lengths_tensor }; // 返回合并后的批次
    }
};

#endif


