#include <vector>
#include <cstring>
#include <string>
#include <iostream>
#include <fstream>
#include <set>
#include <chrono>
#include <iomanip>
#include <sstream>
#include <sys/time.h>
#include <omp.h>
#include <mpi.h>
#include "hnswlib/hnswlib/hnswlib.h"
#include "flat_scan.h"
// 可以自行添加需要的头文件
//#include "simd_flat.h"
//#include "neiji_neon.h"
//#include "simd_pq.h"
//#include "pthread_ivf_pq.h"
//#include "pthread_pq_ivf.h"
//#include "openmp_ivf_pq.h"
//#include "openmp_pq_search.h"
#include "mpi_ivf_pq_search.h"

using namespace hnswlib;

template<typename T>
T* LoadData(std::string data_path, size_t& n, size_t& d) {
    std::ifstream fin;
    fin.open(data_path, std::ios::in | std::ios::binary);
    fin.read((char*)&n, 4);
    fin.read((char*)&d, 4);
    
    // 使用aligned_alloc替代new
    T* data = static_cast<T*>(aligned_alloc(16, n * d * sizeof(T)));
    if (!data) {
    std::cerr << "Memory allocation failed for " << data_path << std::endl;
    MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }
    
    // 读取数据
    for (int i = 0; i < n; ++i) {
        fin.read(reinterpret_cast<char*>(data + i * d), d * sizeof(T));
    }
    fin.close();
    return data;
}

struct SearchResult {
    float recall;
    int64_t latency; // 单位微秒 (us)
};

void build_index(float* base, size_t base_number, size_t vecdim) {
    const int efConstruction = 150;
    const int M = 16;

    HierarchicalNSW<float> *appr_alg;
    InnerProductSpace ipspace(vecdim);
    appr_alg = new HierarchicalNSW<float>(&ipspace, base_number, M, efConstruction);

    appr_alg->addPoint(base, 0);
    #pragma omp parallel for
    for (int i = 1; i < base_number; ++i) {
        appr_alg->addPoint(base + 1ll * vecdim * i, i);
    }

    char path_index[1024] = "files/hnsw.index";
    appr_alg->saveIndex(path_index);
}

int main(int argc, char *argv[]) {
    //MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, nullptr);
    //MPI_Init(&argc, &argv);


    std::cerr << "Rank ?: Entering main()" << std::endl;  // 确保 MPI 还没初始化时也能输出

    int provided;
    MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided);
    if (provided < MPI_THREAD_FUNNELED) {
        std::cerr << "MPI threading support insufficient" << std::endl;
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }

    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    // 数据变量声明
    size_t test_number = 0, base_number = 0;
    size_t test_gt_d = 0, vecdim = 0;
    float *test_query = nullptr, *base = nullptr;
    int *test_gt = nullptr;

    // 仅主进程加载数据
    if (rank == 0) {
    std::string data_path = "/anndata/";
    test_query = LoadData<float>(data_path + "DEEP100K.query.fbin", test_number, vecdim);
    test_gt = LoadData<int>(data_path + "DEEP100K.gt.query.100k.top100.bin", test_number, test_gt_d);
    base = LoadData<float>(data_path + "DEEP100K.base.100k.fbin", base_number, vecdim);
    test_number = 2000; // 限制查询数量
    }

    // 先广播维度信息
    size_t dims[4] = {test_number, base_number, vecdim, test_gt_d};
    MPI_Bcast(dims, 4, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD);
    test_number = dims[0]; base_number = dims[1];
    vecdim = dims[2]; test_gt_d = dims[3];

    // ====== 在这里添加内存分配检查 ======
    if (rank != 0) {
        test_query = static_cast<float*>(aligned_alloc(16, test_number * vecdim * sizeof(float)));
        if (!test_query) {
            std::cerr << "Rank " << rank << ": Failed to allocate test_query" << std::endl;
            MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
        }
        test_gt = new int[test_number * test_gt_d];
        base = static_cast<float*>(aligned_alloc(16, base_number * vecdim * sizeof(float)));
        if (!base) {
            std::cerr << "Rank " << rank << ": Failed to allocate base" << std::endl;
            MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
        }
    }

    // 广播实际数据
    MPI_Bcast(test_query, test_number * vecdim, MPI_FLOAT, 0, MPI_COMM_WORLD);
    MPI_Bcast(test_gt, test_number * test_gt_d, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Bcast(base, base_number * vecdim, MPI_FLOAT, 0, MPI_COMM_WORLD);

    // 初始化搜索结构
    initialize_mpi_ivf_pq(argc, argv, base, base_number, vecdim);

    // 搜索测试
    const size_t k = 10;
    std::vector<SearchResult> results;
    if (rank == 0) results.resize(test_number);

    for (int i = 0; i < test_number; ++i) {

        // ====== 在这里添加索引越界检查 ======
    if (i * vecdim >= test_number * vecdim) {
        std::cerr << "Rank " << rank << ": Index out of bounds at query " << i 
                  << " (i*vecdim=" << i * vecdim 
                  << ", max=" << test_number * vecdim << ")" << std::endl;
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }

        MPI_Barrier(MPI_COMM_WORLD);
        double start = MPI_Wtime();

        if (!test_query || !base || vecdim == 0) {
            std::cerr << "Invalid input to mpi_ivf_pq_search" << std::endl;
            MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
        }

        auto res = mpi_ivf_pq_search(test_query + i * vecdim, base, vecdim, k);

        MPI_Barrier(MPI_COMM_WORLD);
        int64_t latency = static_cast<int64_t>((MPI_Wtime() - start) * 1e6);

        if (rank == 0) {
            std::set<uint32_t> gt(test_gt + i * test_gt_d, test_gt + (i + 1) * test_gt_d);
            size_t correct = std::count_if(res.begin(), res.end(), 
                [&](const auto& p) { return gt.count(p.second) > 0; });
            results[i] = {static_cast<float>(correct) / k, latency};
        }
    }

    if (rank == 0 && results.size() < test_number) {
        std::cerr << "Results vector too small" << std::endl;
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }

    // 结果统计
    if (rank == 0) {
        float avg_recall = 0, avg_latency = 0;
        for (const auto& r : results) {
            avg_recall += r.recall;
            avg_latency += r.latency;
        }
        std::cout << "Average Recall: " << avg_recall/test_number << "\n"
                  << "Average Latency (us): " << avg_latency/test_number << std::endl;
    }

    // 清理资源
    if (test_query) free(test_query);  // aligned_alloc需要用free
    if (base) free(base);
    delete[] test_gt;  // new[]分配的用delete[]
    MPI_Finalize();
    return 0; 
}