#include <torch/torch.h>
#include <iostream>
#include <vector>
#include <algorithm>  // 用于排序

// 前馈神经网络模型
struct FeedForwardNN : torch::nn::Module {
    torch::nn::Linear fc1{nullptr}, fc2{nullptr}, fc3{nullptr};

    FeedForwardNN(int input_size, int hidden_size, int output_size) {
        fc1 = register_module("fc1", torch::nn::Linear(input_size, hidden_size));
        fc2 = register_module("fc2", torch::nn::Linear(hidden_size, hidden_size));
        fc3 = register_module("fc3", torch::nn::Linear(hidden_size, output_size));
    }

    torch::Tensor forward(torch::Tensor x) {
        x = torch::relu(fc1(x));
        x = torch::relu(fc2(x));
        x = fc3(x);
        return x;
    }
};

// 生成归一化训练数据集
void generate_data(std::vector<float>& keys, std::vector<int>& positions, int num_samples) {
    keys.reserve(num_samples);
    positions.reserve(num_samples);
    for (int i = 0; i < num_samples; ++i) {
        keys.push_back(static_cast<float>(i) / num_samples);  // 归一化到[0, 1]范围
        positions.push_back(i);  // position 就是 key 的下标
    }
}

// 划分为N个区间
std::vector<std::pair<float, float>> divide_into_intervals(
        const std::vector<std::pair<float, float>>& key_predictions, int N) {

    int num_predictions = key_predictions.size();
    int interval_size = num_predictions / N;

    std::vector<std::pair<float, float>> intervals;

    // 按每 interval_size 划分区间
    for (int i = 0; i < N; ++i) {
        float start = key_predictions[i * interval_size].second;  // 取出排序后的预测值
        float end = (i == N - 1) ? key_predictions.back().second : key_predictions[(i + 1) * interval_size - 1].second;
        intervals.push_back({start, end});
    }

    return intervals;
}

int main() {
    int num_samples = 100;  // 样本数量
    int num_epochs = 500;   // 训练轮次
    int N = 4;  // 划分为4个区间

    // 生成训练数据
    std::vector<float> keys;
    std::vector<int> positions;
    generate_data(keys, positions, num_samples);

    // 转换为张量
    torch::Tensor input_tensor = torch::tensor(keys, torch::kFloat32).unsqueeze(1);  // 输入: (num_samples, 1)
    torch::Tensor target_tensor = torch::tensor(positions, torch::kFloat32).unsqueeze(1);  // 输出: (num_samples, 1)

    // 创建模型和优化器
    auto model = std::make_shared<FeedForwardNN>(1, 64, 1);  // 输入维度1，隐藏层64，输出维度1
    torch::optim::Adam optimizer(model->parameters(), torch::optim::AdamOptions(0.01));

    // 训练模型
    for (int epoch = 0; epoch < num_epochs; ++epoch) {
        model->train();
        optimizer.zero_grad();
        auto output = model->forward(input_tensor);
        auto loss = torch::mse_loss(output, target_tensor);
        loss.backward();
        optimizer.step();

        if (epoch % 50 == 0) {
            std::cout << "Epoch [" << epoch << "/" << num_epochs << "], Loss: " << loss.item<float>() << std::endl;
        }
    }

    // 预测所有数据点，保留key和预测值
    std::vector<std::pair<float, float>> key_predictions;
    for (const auto& key : keys) {
        auto input = torch::tensor({key}, torch::kFloat32).unsqueeze(0);  // 输入
        auto output = model->forward(input);
        key_predictions.push_back({key, output.item<float>()});  // 保存key和预测值的绑定
    }

    // 对key和预测值按预测值进行排序
    std::sort(key_predictions.begin(), key_predictions.end(),
              [](const std::pair<float, float>& a, const std::pair<float, float>& b) {
                  return a.second < b.second;
              });

    // 将数据划分为N个区间
    std::vector<std::pair<float, float>> intervals = divide_into_intervals(key_predictions, N);

    // 输出划分的区间
    std::cout << "\nDivided Intervals:\n";
    for (int i = 0; i < intervals.size(); ++i) {
        std::cout << "Interval " << i + 1 << ": [" << intervals[i].first << ", " << intervals[i].second << "]\n";
    }

    return 0;
}
