#include <queue>
#include <vector>
#include <arm_neon.h>
#include <algorithm>
#include <random>
#include <numeric>
#include <omp.h>
#include <mutex>
#include <memory>
#include <cstdlib>

// 内存对齐分配器
template<typename T>
struct AlignedAllocator {
    using value_type = T;
    static constexpr size_t alignment = 16; // NEON要求16字节对齐

    AlignedAllocator() = default;
    template<class U> AlignedAllocator(const AlignedAllocator<U>&) {}

    T* allocate(size_t n) {
        if (n > std::numeric_limits<size_t>::max() / sizeof(T))
            throw std::bad_alloc();
        void* ptr = aligned_alloc(alignment, n * sizeof(T));
        if (!ptr) throw std::bad_alloc();
        return static_cast<T*>(ptr);
    }

    void deallocate(T* p, size_t) { free(p); }
};

template<typename T, typename U>
bool operator==(const AlignedAllocator<T>&, const AlignedAllocator<U>&) { return true; }

template<typename T, typename U>
bool operator!=(const AlignedAllocator<T>&, const AlignedAllocator<U>&) { return false; }

// 使用对齐分配器的vector
template<typename T>
using AlignedVector = std::vector<T, AlignedAllocator<T>>;

struct IVF_PQ_Index {
    std::vector<AlignedVector<float>> centroids;  // IVF聚类中心
    std::vector<std::vector<uint32_t>> lists;     // 倒排列表
    size_t nlist = 1024;                          // 聚类中心数量
    size_t sub_dim;                              // PQ子空间维度
    size_t m = 8;                                // PQ子空间数
    size_t ks = 256;                             // PQ每子空间簇数
    std::vector<AlignedVector<float>> pq_codebooks; // PQ码本
    std::vector<std::vector<uint8_t>> pq_codes;    // PQ编码
    std::vector<AlignedVector<float>> centroid_norms; // 聚类中心L2范数
    std::vector<AlignedVector<float>> pq_norms;    // PQ码本L2范数
    bool initialized = false;
};

static IVF_PQ_Index ivf_pq_index;
static std::mutex init_mutex;

// 对齐内存的内积计算
float compute_inner_product_neon(const float* x, const float* y, size_t dim) {
    assert(reinterpret_cast<uintptr_t>(x) % 16 == 0);
    assert(reinterpret_cast<uintptr_t>(y) % 16 == 0);

    float32x4_t sum = vdupq_n_f32(0.0f);
    size_t d = 0;

    for (; d + 4 <= dim; d += 4) {
        float32x4_t x_vec = vld1q_f32(x + d);
        float32x4_t y_vec = vld1q_f32(y + d);
        sum = vmlaq_f32(sum, x_vec, y_vec);
    }

    float32x2_t sum2 = vadd_f32(vget_low_f32(sum), vget_high_f32(sum));
    float result = vget_lane_f32(sum2, 0) + vget_lane_f32(sum2, 1);
    
    for (; d < dim; ++d) {
        result += x[d] * y[d];
    }
    return result;
}

// 快速近似PQ内积计算 (使用预先计算的范数)
float fast_pq_inner_product(const float* query, const std::vector<uint8_t>& pq_code, 
                           const IVF_PQ_Index& index) {
    float ip = 0.0f;
    const float* q_ptr = query;
    
    for (size_t k = 0; k < index.m; ++k) {
        uint8_t codeword = pq_code[k];
        const float* codebook_ptr = index.pq_codebooks[k].data() + codeword * index.sub_dim;
        ip += compute_inner_product_neon(q_ptr, codebook_ptr, index.sub_dim);
        q_ptr += index.sub_dim;
    }
    return ip;
}

// 改进的聚类中心初始化 - 使用随机采样+局部优化
std::vector<size_t> fast_kmeans_init(const float* base, size_t base_number, 
                                   size_t vecdim, size_t ncentroids) {
    std::random_device rd;
    std::mt19937 gen(rd());
    
    // 1. 随机采样候选点
    const size_t sample_size = std::min<size_t>(1000, base_number);
    std::vector<size_t> samples(sample_size);
    std::iota(samples.begin(), samples.end(), 0);
    std::shuffle(samples.begin(), samples.end(), gen);
    
    // 2. 选择距离最远的点作为中心点
    std::vector<size_t> centroids;
    centroids.reserve(ncentroids);
    
    // 第一个中心点随机选择
    centroids.push_back(samples[0]);
    
    // 后续中心点选择距离已有中心最远的点
    AlignedVector<float> max_distances(sample_size, 0.0f);
    
    for (size_t i = 1; i < ncentroids; ++i) {
        #pragma omp parallel for
        for (size_t j = 0; j < sample_size; ++j) {
            float min_dist = std::numeric_limits<float>::max();
            for (size_t c = 0; c < centroids.size(); ++c) {
                float dist = compute_inner_product_neon(
                    base + samples[j] * vecdim,
                    base + centroids[c] * vecdim,
                    vecdim
                );
                dist = 1.0f / (1.0f + std::abs(dist)); // 转换为距离度量
                if (dist < min_dist) min_dist = dist;
            }
            max_distances[j] = min_dist;
        }
        
        size_t farthest = std::max_element(max_distances.begin(), max_distances.end()) - max_distances.begin();
        centroids.push_back(samples[farthest]);
    }
    
    return centroids;
}

// 优化的初始化函数
void init_ivf_pq(const float* base, size_t base_number, size_t vecdim) {
    std::lock_guard<std::mutex> lock(init_mutex);
    if (ivf_pq_index.initialized) return;

    ivf_pq_index.sub_dim = vecdim / ivf_pq_index.m;
    ivf_pq_index.centroids.resize(ivf_pq_index.nlist);
    
    // 1. 使用快速初始化方法
    std::vector<size_t> centroid_indices = fast_kmeans_init(base, base_number, vecdim, ivf_pq_index.nlist);
    
    #pragma omp parallel for
    for (size_t i = 0; i < ivf_pq_index.nlist; ++i) {
        ivf_pq_index.centroids[i].assign(base + centroid_indices[i] * vecdim, 
                                       base + centroid_indices[i] * vecdim + vecdim);
    }

    // 2. 构建倒排列表 (优化版本)
    ivf_pq_index.lists.resize(ivf_pq_index.nlist);
    std::vector<std::mutex> list_mutexes(ivf_pq_index.nlist);
    
    #pragma omp parallel for
    for (size_t i = 0; i < base_number; ++i) {
        const float* vec = base + i * vecdim;
        float max_ip = -std::numeric_limits<float>::max();
        size_t best_cluster = 0;
        
        // 只检查部分聚类中心 (近似)
        const size_t probe_size = std::min<size_t>(16, ivf_pq_index.nlist);
        for (size_t j = 0; j < probe_size; ++j) {
            size_t cluster_idx = (i + j) % ivf_pq_index.nlist; // 简单轮询
            float ip = compute_inner_product_neon(vec, 
                                               ivf_pq_index.centroids[cluster_idx].data(), 
                                               vecdim);
            if (ip > max_ip) {
                max_ip = ip;
                best_cluster = cluster_idx;
            }
        }
        
        std::lock_guard<std::mutex> lock(list_mutexes[best_cluster]);
        ivf_pq_index.lists[best_cluster].push_back(i);
    }

    // 3. 优化的PQ编码初始化
    ivf_pq_index.pq_codebooks.resize(ivf_pq_index.m);
    ivf_pq_index.pq_codes.resize(base_number, std::vector<uint8_t>(ivf_pq_index.m));
    
    #pragma omp parallel
    {
        std::vector<AlignedVector<float>> sub_vectors(ivf_pq_index.m);
        for (size_t k = 0; k < ivf_pq_index.m; ++k) {
            sub_vectors[k].resize(ivf_pq_index.sub_dim);
        }
        
        #pragma omp for
        for (size_t k = 0; k < ivf_pq_index.m; ++k) {
            // 分配子向量空间 (批量处理)
            std::vector<AlignedVector<float>> cluster_samples(ivf_pq_index.ks);
            
            // 随机初始化聚类中心
            std::vector<size_t> sample_indices(ivf_pq_index.ks);
            std::iota(sample_indices.begin(), sample_indices.end(), 0);
            std::shuffle(sample_indices.begin(), sample_indices.end(), std::mt19937{std::random_device{}()});
            
            for (size_t j = 0; j < ivf_pq_index.ks; ++j) {
                size_t idx = sample_indices[j] % base_number;
                cluster_samples[j].assign(base + idx * vecdim + k * ivf_pq_index.sub_dim,
                                        base + idx * vecdim + (k + 1) * ivf_pq_index.sub_dim);
            }

            // 存储codebook
            ivf_pq_index.pq_codebooks[k].resize(ivf_pq_index.ks * ivf_pq_index.sub_dim);
            for (size_t j = 0; j < ivf_pq_index.ks; ++j) {
                std::copy_n(cluster_samples[j].data(), 
                          ivf_pq_index.sub_dim, 
                          ivf_pq_index.pq_codebooks[k].data() + j * ivf_pq_index.sub_dim);
            }
        }
        
        // 批量编码向量
        #pragma omp for
        for (size_t i = 0; i < base_number; ++i) {
            const float* vec = base + i * vecdim;
            for (size_t k = 0; k < ivf_pq_index.m; ++k) {
                const float* sub_vec = vec + k * ivf_pq_index.sub_dim;
                
                float max_ip = -std::numeric_limits<float>::max();
                uint8_t best_codeword = 0;
                
                // 只检查部分码字 (近似)
                const size_t probe_size = std::min<size_t>(16, ivf_pq_index.ks);
                for (uint8_t j = 0; j < probe_size; ++j) {
                    size_t code_idx = (i + j) % ivf_pq_index.ks;
                    const float* codebook_ptr = ivf_pq_index.pq_codebooks[k].data() + code_idx * ivf_pq_index.sub_dim;
                    float ip = compute_inner_product_neon(sub_vec, codebook_ptr, ivf_pq_index.sub_dim);
                    if (ip > max_ip) {
                        max_ip = ip;
                        best_codeword = code_idx;
                    }
                }
                ivf_pq_index.pq_codes[i][k] = best_codeword;
            }
        }
    }

    ivf_pq_index.initialized = true;
}

using MaxHeap = std::priority_queue<std::pair<float, uint32_t>, 
                                 std::vector<std::pair<float, uint32_t>>, 
                                 std::less<std::pair<float, uint32_t>>>;

MaxHeap openmp_ivf_pq(const float* base, const float* query, size_t base_number, 
                          size_t vecdim, size_t top_k, size_t nprobe = 10, 
                          size_t rerank_k = 200) {
    if (!ivf_pq_index.initialized) {
        init_ivf_pq(base, base_number, vecdim);
    }

    nprobe = std::min(nprobe, ivf_pq_index.nlist);
    rerank_k = std::min(rerank_k, static_cast<size_t>(2000));

    AlignedVector<float> aligned_query(vecdim);
    std::copy_n(query, vecdim, aligned_query.data());

    // 1. 粗搜索聚类中心 (使用内积)
    std::vector<std::pair<float, size_t>> centroid_ips(ivf_pq_index.nlist);
    #pragma omp parallel for
    for (size_t i = 0; i < ivf_pq_index.nlist; ++i) {
        centroid_ips[i] = {compute_inner_product_neon(aligned_query.data(), 
                                                   ivf_pq_index.centroids[i].data(), 
                                                   vecdim), 
                         i};
    }
    
    // 部分排序 (只排序前nprobe*2个)
    std::partial_sort(centroid_ips.begin(), centroid_ips.begin() + std::min(nprobe * 2, ivf_pq_index.nlist),
                     centroid_ips.end(), std::greater<std::pair<float, size_t>>());

    // 2. 收集候选 (限制每个列表的大小)
    std::vector<uint32_t> candidates;
    candidates.reserve(nprobe * 1000); // 预分配空间
    
    for (size_t i = 0; i < nprobe && candidates.size() < rerank_k * 2; ++i) {
        const auto& list = ivf_pq_index.lists[centroid_ips[i].second];
        size_t take = std::min<size_t>(list.size(), rerank_k * 2 / nprobe);
        candidates.insert(candidates.end(), list.begin(), list.begin() + take);
    }

    // 3. 快速近似筛选 (使用近似PQ内积)
    std::vector<std::pair<float, uint32_t>> approx_ips;
    approx_ips.reserve(candidates.size());
    
    #pragma omp parallel for
    for (size_t i = 0; i < candidates.size(); ++i) {
        uint32_t idx = candidates[i];
        approx_ips.emplace_back(fast_pq_inner_product(aligned_query.data(), 
                                                   ivf_pq_index.pq_codes[idx], 
                                                   ivf_pq_index), 
                              idx);
    }

    // 4. 部分排序近似结果 (按内积降序排序)
    rerank_k = std::min(rerank_k, approx_ips.size());
    std::nth_element(approx_ips.begin(), approx_ips.begin() + rerank_k,
                    approx_ips.end(), std::greater<std::pair<float, uint32_t>>());
    std::sort(approx_ips.begin(), approx_ips.begin() + rerank_k,
             std::greater<std::pair<float, uint32_t>>());

    // 5. 精确重排序 (使用内积)
    MaxHeap exact_heap;
    AlignedVector<float> aligned_vec(vecdim);
    
    #pragma omp parallel for private(aligned_vec)
    for (size_t i = 0; i < rerank_k; ++i) {
        uint32_t idx = approx_ips[i].second;
        aligned_vec.assign(base + idx * vecdim, base + idx * vecdim + vecdim);
        float exact_ip = compute_inner_product_neon(aligned_query.data(), 
                                                 aligned_vec.data(), 
                                                 vecdim);
        
        #pragma omp critical
        {
            if (exact_heap.size() < top_k || exact_ip > exact_heap.top().first) {
                if (exact_heap.size() == top_k) exact_heap.pop();
                exact_heap.emplace(exact_ip, idx);
            }
        }
    }
    
    return exact_heap;
}