#ifndef DATASET_NEW_H
#define DATASET_NEW_H

#include <iostream>
#include <vector>
#include <string>
#include <cstring>
#include <filesystem>
#include <algorithm>
#include <fstream>
#include <torch/torch.h>
#include <opencv2/opencv.hpp>

#include <algorithm>
#include <random>
#include <chrono>


#define CUSTOM_ASSERT(condition, message) \
    do { \
        if (!(condition)) { \
            std::cerr << "Assertion failed: " << message << " | " \
                      << "File: " << __FILE__ << " | " \
                      << "Line: " << __LINE__ << " | " \
                      << "Function: " << __func__ << "\n"; \
            std::abort(); \
        } \
    } while (0)


struct CUSTOM_INFO {
    const std::string alphaNum = " 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+&~!@#";
    int max_height = 100;    // train image.height
    int max_width = 200;     // train image.width
};


class OCRDataset : public torch::data::Dataset<OCRDataset> {
public:
    explicit OCRDataset(const std::string &root_dir) {
        // 遍历目录获取所有PNG文件
        for (const auto &entry: std::filesystem::directory_iterator(root_dir)) {
            if (entry.path().extension() != ".png") { continue; }

            std::string filename = entry.path().stem().string();

            // 解析文件名中的标签
            size_t last_underscore = filename.find_last_of('_');
            if (last_underscore == std::string::npos) { continue; }
            std::string label_str = filename.substr(0, last_underscore);
            if (label_str.empty()) { continue; }

            // 转换字符到索引
            std::vector<int> label_indices;
            try {
                label_indices = string_to_indices(label_str);
                samples_.emplace_back(entry.path().string(), label_indices);
            } catch (const std::exception &) {
                continue;
            }
        }

        unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
        std::default_random_engine rng(seed);
        std::shuffle(samples_.begin(), samples_.end(), rng);
    }

    torch::data::Example<> get(size_t index) override {
        // 读取图像
        std::string img_path = samples_[index].first;

        cv::Mat image = cv::imread(img_path, cv::IMREAD_COLOR);
        CUSTOM_ASSERT(image.rows == cust_info.max_height && image.cols == cust_info.max_width,
                      "Image size is not correct");
        cv::cvtColor(image, image, cv::COLOR_BGR2RGB);

        // Convert to tensor   HWC -> CHW
        torch::Tensor tensor_image = torch::from_blob(
                image.data,
                {image.rows, image.cols, 3},
                torch::kByte
        ).permute({2, 0, 1})
                .to(torch::kFloat32)
                .div(255)
                .sub(0.5)
                .div(0.5);


        // 创建标签张量
        std::vector<int64_t> label(samples_[index].second.begin(), samples_[index].second.end());
        torch::Tensor tensor_label = torch::from_blob(
                label.data(), {static_cast<int64_t>(label.size())}, torch::kInt64).clone();

        return {tensor_image, tensor_label};
    }

    [[nodiscard]] torch::optional<size_t> size() const override {
        return samples_.size();
    }

private:
    std::vector<std::pair<std::string, std::vector<int>>> samples_;
    CUSTOM_INFO cust_info;

    std::vector<int> string_to_indices(const std::string &str) {
        std::vector<int> indices;
        for (char c: str) {
            size_t pos = cust_info.alphaNum.find(c);
            if (pos == std::string::npos) {
                throw std::runtime_error("Invalid character in label: " + std::string(1, c));
            }
            indices.push_back(static_cast<int>(pos));
        }
        return indices;
    }

};


struct CustomExample {
    torch::Tensor data;
    torch::Tensor target;
    torch::Tensor target_lengths;
};


struct CollateFn : public torch::data::transforms::BatchTransform<std::vector<torch::data::Example<>>, CustomExample> {
    // 必须定义的类型别名
    using InputBatchType = std::vector<torch::data::Example<>>;
    using OutputBatchType = CustomExample;

    // 实现基类要求的接口
    OutputBatchType apply_batch(InputBatchType input_batch) override {
        if (input_batch.empty()) {
            throw std::runtime_error("CollateFn received empty batch");
        }

        std::vector<torch::Tensor> images;
        std::vector<torch::Tensor> labels;
        std::vector<int64_t> seq_lengths;

        for (auto &example: input_batch) {
            if (!example.data.defined() || !example.target.defined()) {
                throw std::runtime_error("Invalid example with undefined tensor");
            }
            if (example.data.sizes().size() != 3) {
                throw std::runtime_error("Image tensor must be 3-dimensional [C, H, W]");
            }
            images.push_back(example.data);
            labels.push_back(example.target);
            seq_lengths.push_back(example.target.size(0));
        }

        // 合并图像 [N, C, H, W]
        auto image_tensor = torch::stack(images);

        // 合并标签（带填充）
        int64_t max_len = *std::max_element(seq_lengths.begin(), seq_lengths.end());
        std::vector<torch::Tensor> padded_labels;
        for (auto &label: labels) {
            int64_t pad_size = max_len - label.size(0);
            auto padded = torch::constant_pad_nd(label, {0, pad_size}, 0);
            padded_labels.push_back(padded);
        }
        auto label_tensor = torch::stack(padded_labels);                  // [N, max_len]


        // 将 seq_lengths 转换为 torch::Tensor
        auto seq_lengths_tensor = torch::tensor(seq_lengths, torch::kInt64);

        if (image_tensor.device() != label_tensor.device()) {
            throw std::runtime_error("Tensors are on different devices");
        }

        return {image_tensor, label_tensor, seq_lengths_tensor}; // 返回合并后的批次
    }
};

#endif


