//
// Created by yk120 on 2024/3/1.
//

#include <nanofaiss/Clustering.h>
#include <nanofaiss/impl/FaissAssert.h>
#include <nanofaiss/utils/random.h>

#include <omp.h>

#include <memory>
#include <cstring>
#include <cmath>

namespace faiss {

Clustering::Clustering(int d, int k) : d(d), k(k) {}

Clustering::Clustering(int d, int k, const ClusteringParameters& cp)
                : ClusteringParameters(cp), d(d), k(k) {}

void Clustering::train(idx_t n, const float* x, Index& index) {
    train_encoded(
            n,
            reinterpret_cast<const uint8_t*>(x),
            nullptr,
            index);
}

namespace {

// sample the data from the dataset
// x_out must be managed by user
idx_t subsample_training_set(
        const Clustering &cluster,
        idx_t nx,
        const uint8_t *x,
        size_t line_size,
        uint8_t* &x_out) {
    std::vector<int> perm(nx);
    rand_perm(perm.data(), nx, cluster.seed);
    nx = cluster.k * cluster.max_points_per_centroid; // sample number
    uint8_t *x_new = new uint8_t[nx * line_size]; // sample vector
    x_out = x_new;

    // copy the data to x_new
#pragma omp parallel for
    for (idx_t i = 0; i < nx; i++) {
        memcpy(x_new + line_size * i, x + line_size * perm[i], line_size);
    }

    return nx;
}

/** compute the centroid of current cluster
 * @param d                 size of the vector
 * @param k                 number of cluster
 * @param n                 number of the vector
 * @param x                 vectors
 * @param codec             decoder of the vector
 * @param idx               the nearest of every vector
 * @param histogram_idx     calculate the number of every cluster
 * @param centroids         out centroid of vector
 */
void compute_centroids(
        size_t d,
        size_t k,
        size_t n,
        const uint8_t* x,
        const Index *codec,
        const idx_t * idx,
        float *histogram_idx,
        float *centroids) {
    memset(centroids, 0, sizeof(*centroids) * d * k);

    size_t line_size = codec ? codec->sa_code_size() : d * sizeof(float);

    // parallel for dealing by centroids
#pragma omp parallel
    {
            int nt = omp_get_num_threads();
            int rank = omp_get_thread_num();

            size_t c0 = (k * rank) / nt;
            size_t c1 = (k * (rank + 1)) / nt;
            std::vector<float> decode_buffer(d);

            for (size_t i = 0; i < n; i++) {
                idx_t ci = idx[i];
                if (ci >= c0 && ci < c1) {
                    float *c = centroids + ci * d; // get the centroid of current point i
                    const float *xi;
                    if (!codec) {
                        xi = reinterpret_cast<const float*>(x + i * line_size); // get the vector of i
                    } else {
                        //TODO
                    }

                    histogram_idx[ci] += 1.0; // calculate the number of current cluster
                    for (size_t j = 0; j < d; j++) {
                        c[j] += xi[j]; // calculate the centroids
                    }
                }
            }
    };

    // calculate the finally centroids
#pragma omp parallel for
    for (idx_t ci = 0; ci < k; ci++) {
            if (histogram_idx[ci] == 0)
                continue;
            float norm = 1 / histogram_idx[ci];
            float *c = centroids + ci * d;
            for (size_t j = 0; j < d; j++)
                c[j] *= norm;
    }
}

#define EPS (1 / 1024.)

/** Handle empty cluster by splitting larger one
 * @param d
 * @param k
 * @param n
 * @param histogram_idx
 * @param centroids
 * @return nb of split operation
 */
int split_clusters(
        size_t d,
        size_t k,
        size_t n,
        float *histogram_idx,
        float *centroids) {

    size_t nsplits = 0; // number of split operation
    std::mt19937 mt(1234);
    for (size_t ci = 0; ci != k; ci++) {
        // we need refine the cluster
        if (histogram_idx[ci] == 0) {
            // research is a way which find ideas to solve some problem and share it with others
            size_t cj = 0;
            while (1) {
                float p = (histogram_idx[cj] - 1.0) / (n - k);
                float r = mt() / (float)mt.max();
                if (r < p)
                    break; // find best cluster to split
                cj = (cj + 1) % k;
            }

            // copy cj -> ci
            memcpy(centroids + ci * d,
                   centroids + cj * d,
                   sizeof(*centroids) * d);

            // symmetric change to the same centroids
            for (size_t j = 0; j < d; j++) {
                if (j % 2 == 0) {
                    centroids[ci * d + j] *= 1 + EPS;
                    centroids[cj * d + j] *= 1 - EPS;
                } else {
                    centroids[ci * d + j] *= 1 - EPS;
                    centroids[cj * d + j] *= 1 + EPS;
                }
            }
            histogram_idx[ci] = histogram_idx[cj] / 2;
            histogram_idx[cj] -= histogram_idx[ci];
            nsplits++;
        }
    }
    return nsplits;

}

}

void Clustering::train_encoded(
        idx_t nx,
        const uint8_t* xin,
        const Index* codec,
        Index& index) {
    FAISS_THROW_IF_NOT(nx >= k);
    FAISS_THROW_IF_NOT(!codec || codec->d == d);
    FAISS_THROW_IF_NOT(index.d == d);

    // not need to decode the vector
    if (!codec) {
        const float *x = reinterpret_cast<const float*>(xin);
        for (size_t i = 0; i < nx * d; i++)
            FAISS_THROW_IF_NOT(std::isfinite(x[i])); // check the right of float number
    }

    const uint8_t *x = xin;
    std::unique_ptr<uint8_t[]> del1;
    std::unique_ptr<float[]> del3;

    size_t line_size = codec ? codec->sa_code_size() : sizeof(float) * d; // size of single vector

    // if the number of dataset too large
    if (nx > k * max_points_per_centroid) {
        uint8_t  *x_new; // x_new is the sampled data
        // now the data will be sampled
        nx = subsample_training_set(
                *this, nx, x, line_size, x_new);
        del1.reset(x_new); // del1 manage x_new
        x = x_new;
    } else if (nx < k * min_points_per_centroid) { // vice verus

    }

    // now x saved the sampled data or actual data, nx is number of vectors


    if (nx == k) { // just fine situation

    }

    std::unique_ptr<idx_t[]> idx(new idx_t[nx]);
    std::unique_ptr<float[]> distance(new float[nx]);

    bool lower_is_better = !is_similarity_metric(index.metric_type);
    float best_obj = lower_is_better ? HUGE_VALF : -HUGE_VALF;
    std::vector<float> best_centroids; // save the best result;

    //TODO not support input centroid

    std::vector<float> decode_buffer(codec ? d * decode_block_size : 0);


    // whether redo k-means
    for (int redo = 0; redo < nredo; redo++) {
        centroids.resize(d * k);
        std::vector<int> perm(nx);
        rand_perm(perm.data(), nx, seed + 1 + redo * 15486557L);

        if (!codec) {
            // not need decoder
            for (int i = 0; i < k; i++)
                // copy the random i vector -> centroid
                memcpy(centroids.data() + i * d, x + perm[i] * line_size, line_size); // random select centroid from sampled data
        } else {

        }

        post_process_centroids(); // some posted operation on the centroids

        if (index.ntotal != 0) index.reset(); // must ensure the index is none

        if (!index.is_trained) index.train(k, centroids.data());

        index.add(k, centroids.data()); // first add the point as index

        //! k-means iterations

        float obj = 0;
        for (int i = 0; i < niter; i++) {
            if (!codec) {
                // search the nearest neighbor
                index.search(
                        nx,
                        reinterpret_cast<const float*>(x),
                        1,
                        distance.get(),
                        idx.get());
            } else {
                //TODO pass
            }

            obj = 0;
            for (int j = 0; j < nx; j++) {
                obj += distance[j];
            }

            std::vector<float> histogram_idx(k); // calculate the number of every centroids

            compute_centroids(
                    d,
                    k,
                    nx,
                    x,
                    codec,
                    idx.get(),
                    histogram_idx.data(),
                    centroids.data());

            int nsplits = split_clusters(
                    d, k, nx, histogram_idx.data(), centroids.data());

            post_process_centroids(); // post process the centroids
            index.reset(); // reset the index

            //TODO update index

            // add the new centroids
            index.add(k, centroids.data());
        }

        if (redo > 1) {
            if ((lower_is_better && obj < best_obj) ||
                (!lower_is_better && obj > best_obj)) {
                best_centroids = centroids;
                best_obj = obj;
            }
            index.reset();
        }
    }

    if (nredo > 1) {
        centroids = best_centroids;
        index.reset();
        index.add(k, best_centroids.data());
    }



}

void Clustering::post_process_centroids() {
    // TODO
}


}
