//
// Created by SongpingWang on 2025/2/8.
//

#ifndef YOLOV11_CPP_DATASET_H
#define YOLOV11_CPP_DATASET_H


#include <torch/torch.h>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc.hpp>
#include <random>
#include <cmath>
#include <iostream>
#include <algorithm>
#include <filesystem>
#include <vector>
#include <string>
#include <fstream>
#include <unordered_map>
#include <iostream>
#include <map>


class Albumentations {
public:
    Albumentations() {
        // Initialize random number generator
        thread_local std::random_device rd;
        gen = std::mt19937(rd());
    }

    std::tuple<cv::Mat, std::vector<std::vector<float>>, std::vector<int>> operator()(
            cv::Mat &image,
            const std::vector<std::vector<float>> &boxes,
            const std::vector<int> &class_labels) {

        std::uniform_real_distribution<> dis(0.0, 1.0);

        if (dis(gen) < 0.01) {
            cv::GaussianBlur(image, image, cv::Size(0, 0), 3);
        }

        if (dis(gen) < 0.01) {
            cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(2.0, cv::Size(8, 8));
            cv::Mat lab_image;
            cv::cvtColor(image, lab_image, cv::COLOR_BGR2Lab);
            std::vector<cv::Mat> lab_planes(3);
            cv::split(lab_image, lab_planes);
            clahe->apply(lab_planes[0], lab_planes[0]);
            cv::merge(lab_planes, lab_image);
            cv::cvtColor(lab_image, image, cv::COLOR_Lab2BGR);
        }

        if (dis(gen) < 0.01) {
            cv::cvtColor(image, image, cv::COLOR_BGR2GRAY);
            cv::cvtColor(image, image, cv::COLOR_GRAY2BGR);
        }

        if (dis(gen) < 0.01) {
            cv::medianBlur(image, image, 3);
        }

        return std::make_tuple(image, boxes, class_labels);
    }

private:
    std::mt19937 gen;
};


class CustomDataset : public torch::data::datasets::Dataset<CustomDataset> {
public:
    CustomDataset(const std::vector<std::string> &filenames, int input_size,
                  const std::map<std::string, double> &params, bool augment);

    torch::data::Example<> get(size_t index) override;

    torch::optional<size_t> size() const override { return n_; }

private:
    std::tuple<cv::Mat, std::pair<int, int>> load_image(size_t index);

    std::tuple<cv::Mat, torch::Tensor> load_mosaic(size_t index);

    static std::unordered_map<std::string, torch::Tensor> load_label(const std::vector<std::string> &filenames);

    std::vector<std::string> filenames_;
    int input_size_;
    std::map<std::string, double> params_;
    bool augment_;
    std::unordered_map<std::string, torch::Tensor> labels_;
    size_t n_;
    std::vector<size_t> indices_;
    Albumentations albumentations_;
    std::mutex mtx_; // Mutex for thread safety
};


static int generateRandomNumber(int min, int max) {
    // Seed with a real random value, if available
    std::random_device r;

    // Seed the random-number engine
    std::mt19937 engine(r());

    // Define a distribution in the desired range
    std::uniform_int_distribution<int> dist(min, max);

    // Generate the random number
    return dist(engine);
}


// Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
torch::Tensor wh2xy(const torch::Tensor &x, int w = 640, int h = 640, int pad_w = 0, int pad_h = 0) {
    auto y = x.clone();
    y.select(1, 0) = w * (x.select(1, 0) - x.select(1, 2) / 2) + pad_w;  // top left x
    y.select(1, 1) = h * (x.select(1, 1) - x.select(1, 3) / 2) + pad_h;  // top left y
    y.select(1, 2) = w * (x.select(1, 0) + x.select(1, 2) / 2) + pad_w;  // bottom right x
    y.select(1, 3) = h * (x.select(1, 1) + x.select(1, 3) / 2) + pad_h;  // bottom right y
    return y;
}

// Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
torch::Tensor xy2wh(const torch::Tensor &x, int w, int h) {
    auto y = x.clone();
    y.select(1, 0) = ((x.select(1, 0) + x.select(1, 2)) / 2) / w;  // x center
    y.select(1, 1) = ((x.select(1, 1) + x.select(1, 3)) / 2) / h;  // y center
    y.select(1, 2) = (x.select(1, 2) - x.select(1, 0)) / w;  // width
    y.select(1, 3) = (x.select(1, 3) - x.select(1, 1)) / h;  // height
    return y;
}

// Resample choices for resizing
int resample() {
    static const std::vector<int> choices = {
            cv::INTER_AREA,
            cv::INTER_CUBIC,
            cv::INTER_LINEAR,
            cv::INTER_NEAREST,
            cv::INTER_LANCZOS4
    };
    thread_local std::random_device rd;
    thread_local std::mt19937 gen(rd());
    std::uniform_int_distribution<> dis(0, (int) choices.size() - 1);
    return choices[dis(gen)];
}

// HSV color-space augmentation
void augment_hsv(cv::Mat &image, const std::map<std::string, double> &params) {
    double h = params.at("hsv_h");
    double s = params.at("hsv_s");
    double v = params.at("hsv_v");

    thread_local std::random_device rd;
    thread_local std::mt19937 gen(rd());
    std::uniform_real_distribution<> dis(-1.0, 1.0);

    double r[3] = {dis(gen) * h + 1, dis(gen) * s + 1, dis(gen) * v + 1};

    cv::Mat hsv;
    cv::cvtColor(image, hsv, cv::COLOR_BGR2HSV);
    std::vector<cv::Mat> channels(3);
    cv::split(hsv, channels);

    for (int i = 0; i < 3; ++i) {
        cv::Mat lut(1, 256, CV_8U);
        int max_val = (i == 0) ? 180 : 256;
        for (int j = 0; j < 256; ++j) {
            int val = static_cast<int>((j * r[i])) % max_val;
            lut.at<uchar>(j) = static_cast<uchar>(std::max(0, std::min(val, 255)));
        }
        cv::LUT(channels[i], lut, channels[i]);
    }

    cv::merge(channels, hsv);
    cv::cvtColor(hsv, image, cv::COLOR_HSV2BGR);
}

// Resize and pad image while meeting stride-multiple constraints
std::tuple<cv::Mat, std::pair<double, double>, std::pair<double, double>>
resize(cv::Mat &image, int input_size, bool augment) {
    int h = image.rows;
    int w = image.cols;

    double r = std::min(static_cast<double>(input_size) / h, static_cast<double>(input_size) / w);
    if (!augment) {
        r = std::min(r, 1.0);
    }

    int new_w = static_cast<int>(std::round(w * r));
    int new_h = static_cast<int>(std::round(h * r));
    int pad_w = (input_size - new_w) / 2;
    int pad_h = (input_size - new_h) / 2;

    cv::Mat resized;
    if (w != new_w || h != new_h) {
        cv::resize(image, resized, cv::Size(new_w, new_h), 0, 0, augment ? resample() : cv::INTER_LINEAR);
    } else {
        resized = image.clone();
    }

    cv::copyMakeBorder(resized, image, pad_h, pad_h, pad_w, pad_w, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));

    return std::make_tuple(image, std::make_pair(r, r), std::make_pair(pad_w, pad_h));
}

// Check if candidate boxes are valid
torch::Tensor candidates(const torch::Tensor &box1, const torch::Tensor &box2) {
    auto w1 = box1.select(0, 2) - box1.select(0, 0);
    auto h1 = box1.select(0, 3) - box1.select(0, 1);
    auto w2 = box2.select(0, 2) - box2.select(0, 0);
    auto h2 = box2.select(0, 3) - box2.select(0, 1);

    auto aspect_ratio = torch::max(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16));
    return (w2 > 2) & (h2 > 2) & (w2 * h2 / (w1 * h1 + 1e-16) > 0.1) & (aspect_ratio < 100);
}

// Apply random perspective transformation
std::tuple<cv::Mat, torch::Tensor> random_perspective(
        cv::Mat &image,
        torch::Tensor &label,
        const std::map<std::string, double> &params,
        std::pair<int, int> border = {0, 0}) {
    int h = image.rows + border.first * 2;
    int w = image.cols + border.second * 2;

    cv::Mat matrix = cv::Mat::eye(2, 3, CV_64F);

    thread_local std::random_device rd;
    thread_local std::mt19937 gen(rd());
    std::uniform_real_distribution<> dis(0.0, 1.0);

    double angle = (dis(gen) * 2 - 1) * params.at("degrees");
    double scale = (dis(gen) * 2 - 1) * params.at("scale") + 1;
    double shear_x = (dis(gen) * 2 - 1) * params.at("shear") * M_PI / 180;
    double shear_y = (dis(gen) * 2 - 1) * params.at("shear") * M_PI / 180;
    double translate_x = (dis(gen) * 2 - 1) * params.at("translate") * w;
    double translate_y = (dis(gen) * 2 - 1) * params.at("translate") * h;

    matrix = (cv::Mat_<double>(2, 3) <<
                                     1, std::tan(shear_x),
            translate_x, std::tan(shear_y),
            1, translate_y) * cv::getRotationMatrix2D(cv::Point2f(0, 0), angle, scale);

    cv::warpAffine(image, image, matrix, cv::Size(w, h), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));

    if (label.numel() > 0) {
        torch::Tensor xy = torch::ones({label.size(0) * 4, 3});
        xy.slice(1, 0, 2) = label.index(
                {torch::indexing::Slice(), torch::tensor({1, 2, 3, 4, 1, 4, 3, 2}, torch::kInt64)}).reshape({-1, 2});
        xy = xy.matmul(torch::from_blob(matrix.ptr<double>(), {2, 3}, torch::kDouble).transpose(0, 1).to(
                torch::kFloat32)).slice(1, 0, 2).reshape({-1, 8});

        torch::Tensor x = xy.slice(1, 0, xy.size(1), 2).reshape({-1, 4}).transpose(0, 1);
        torch::Tensor y = xy.slice(1, 1, xy.size(1), 2).reshape({-1, 4}).transpose(0, 1);
        torch::Tensor box_x_min = std::get<0>(x.min(1));
        torch::Tensor box_y_min = std::get<0>(y.min(1));
        torch::Tensor box_x_max = std::get<0>(x.max(1));
        torch::Tensor box_y_max = std::get<0>(y.max(1));
        torch::Tensor box = torch::cat({box_x_min, box_y_min, box_x_max, box_y_max}, 1);

        box.slice(1, 0, 2).clamp_(0, w);
        box.slice(1, 2, 4).clamp_(0, h);

        auto indices = candidates(label.slice(1, 1, 5).t() * scale, box.t());
        label = label.index({indices});
        label.slice(1, 1, 5) = box.index({indices});
    }

    return std::make_tuple(image, label);
}


// Apply MixUp augmentation
double sample_beta(double alpha, double beta) {
    std::random_device rd;
    std::mt19937 gen(rd());

    std::gamma_distribution<> gamma_alpha(alpha, 1.0);
    std::gamma_distribution<> gamma_beta(beta, 1.0);

    double x = gamma_alpha(gen);
    double y = gamma_beta(gen);

    return x / (x + y);
}

std::tuple<cv::Mat, torch::Tensor> mix_up(
        const cv::Mat &image1,
        const torch::Tensor &label1,
        const cv::Mat &image2,
        const torch::Tensor &label2) {
    double alpha = sample_beta(32.0, 32.0);
    cv::Mat image;
    cv::addWeighted(image1, alpha, image2, 1 - alpha, 0, image);
    auto label = torch::cat({label1, label2}, 0);
    return std::make_tuple(image, label);
}


#endif //YOLOV11_CPP_DATASET_H
