/*
 * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <algorithm>
#include <atomic>
#include <ascenddaemon/impl/IndexIVFSQ.h>
#include <ascenddaemon/impl/AuxIndexStructures.h>
#include <ascenddaemon/utils/Limits.h>

namespace ascend {
namespace {
const int SEARCH_LIST_SIZE = 65536; // must be CUBE_ALIGN_SIZE aligned
const int SEARCH_SHAPED_SIZE = SEARCH_LIST_SIZE / CUBE_ALIGN_SIZE;
const int TIMEOUT_CHECK_TICK = 5120;
const double TIMEOUT_MS = 50000;
const int FLAG_ALIGN_SIZE = 32;   // dual core ops need 32 flag, while single core need 16.
const int FLAG_ALIGN_OFFSET = 16; // core 0 use first 16 flag, and core 1 use the second 16 flag.
const int SIZE_ALIGN_SIZE = 8;
const int THREADS_CNT = 4;
const int SQOP_INPUT_NUM = 6;
const int SQOP_OUTPUT_NUM = 2;

// 0x18000000 mean 384M(resource mem pool's size)
const int IVFSQ_DEFAULT_TEMP_MEM = static_cast<int>(0x18000000);
}

struct QueueItem {
    QueueItem() : distPtr(nullptr), idPtr(nullptr), flagPtr(nullptr), size(0), executing(false) {}

    QueueItem(const QueueItem &item)
    {
        distPtr = item.distPtr;
        idPtr = item.idPtr;
        flagPtr = item.flagPtr;
        size = item.size;
        if (item.executing) {
            executing = true;
        } else {
            executing = false;
        }
    }

    QueueItem &operator = (const QueueItem &item)
    {
        distPtr = item.distPtr;
        idPtr = item.idPtr;
        flagPtr = item.flagPtr;
        size = item.size;
        if (item.executing) {
            executing = true;
        } else {
            executing = false;
        }

        return *this;
    }

    void SetExecuting(float16_t *dist, uint32_t *id, uint16_t *flag, int s)
    {
        ASCEND_ASSERT(dist != nullptr);
        ASCEND_ASSERT(id != nullptr);
        ASCEND_ASSERT(flag != nullptr);
        ASCEND_ASSERT(s != 0);

        distPtr = dist;
        idPtr = id;
        flagPtr = flag;
        flagPtrSec = flag + FLAG_ALIGN_OFFSET;
        size = s;
        executing = true;
    }

    inline bool IsExecuting()
    {
        return executing;
    }

    float16_t *distPtr;            // distance result mem pointer
    uint32_t *idPtr;               // ids mem pointer
    uint16_t *volatile flagPtr;    // flag mem pointer for aicore 0, the first uint16_t will be setted to 1 when aicore finished calc
    uint16_t *volatile flagPtrSec; // flag mem pointer for aicore 1, the first uint16_t will be setted to 1 when aicore finished calc
    int size;                      // size to idicate how many code to calc, and how many results to topk functor
    std::atomic<bool> executing;   // whether the item has beed added to stream for executing
};

IndexIVFSQ::IndexIVFSQ(int numList, int dim, bool encodeResidual, int nprobes)
    : IndexIVF(numList, dim, dim, nprobes, IVFSQ_DEFAULT_TEMP_MEM),
      byResidual(encodeResidual),
      distSqOpInput(SQOP_INPUT_NUM, nullptr),
      distSqOpOutput(SQOP_OUTPUT_NUM, nullptr),
      threadPool(nullptr)
{
    ASCEND_THROW_IF_NOT(dim % CUBE_ALIGN_SIZE == 0);

    isTrained = false;

    // supported batch size
    searchPageSizes = {64, 32, 16, 8, 4, 2, 1};

    threadPool = new ThreadPool(THREADS_CNT);

    for (int i = 0; i < numLists; ++i) {
        preComputeData.push_back(
            std::make_unique<DeviceVector<float>>(MemorySpace::DEVICE_HUGEPAGE));
    }
    resetDistCompOperator(numList);
    resetSqDistOperator();
}

IndexIVFSQ::~IndexIVFSQ()
{
    if (threadPool) {
        delete threadPool;
    }
}

void IndexIVFSQ::reset()
{
    // reset the database and precomputed, but trained values is maintained.
    IndexIVF::reset();

    preComputeData.clear();
    for (int i = 0; i < numLists; ++i) {
        preComputeData.push_back(
            std::make_unique<DeviceVector<float>>(MemorySpace::DEVICE_HUGEPAGE));
    }
}

void IndexIVFSQ::reserveMemory(size_t numVecs)
{
    size_t numVecsPerList = utils::divUp(numVecs, static_cast<size_t>(numLists));
    if (numVecsPerList < 1) {
        return;
    }

    numVecsPerList = utils::roundUp(numVecsPerList, static_cast<size_t>(CUBE_ALIGN_SIZE));
    size_t tmpLen = numVecsPerList * static_cast<size_t>(numLists);
    IndexIVF::reserveMemory(tmpLen);

    for (auto &list : preComputeData) {
        list->reserve(numVecsPerList);
    }
}

void IndexIVFSQ::reserveMemory(int listId, size_t numVecs)
{
    if (numVecs < 1) {
        return;
    }

    numVecs = utils::roundUp(numVecs, static_cast<size_t>(CUBE_ALIGN_SIZE));
    IndexIVF::reserveMemory(listId, numVecs);

    ASCEND_THROW_IF_NOT((listId < numLists) && (listId >= 0));
    preComputeData[listId]->reserve(numVecs);
}

size_t IndexIVFSQ::reclaimMemory()
{
    size_t totalReclaimed = IndexIVF::reclaimMemory();

    for (auto &list : preComputeData) {
        totalReclaimed += list->reclaim(true);
    }

    return totalReclaimed;
}

size_t IndexIVFSQ::reclaimMemory(int listId)
{
    ASCEND_THROW_IF_NOT((listId < numLists) && (listId >= 0));

    size_t totalReclaimed = IndexIVF::reclaimMemory(listId);

    totalReclaimed += preComputeData[listId]->reclaim(true);

    return totalReclaimed;
}

void IndexIVFSQ::addVectors(int listId, size_t numVecs, const uint8_t *codes,
    const uint32_t *indices, const float *preCompute)
{
    ASCEND_THROW_IF_NOT(this->isTrained);
    ASCEND_THROW_IF_NOT(listId >= 0 && listId < numLists);

    if (numVecs == 0) {
        return;
    }

    // code need to be Zz format because of DistanceComputeSQ8 operator's limitation.
    //       origin code for example (shape n X dim). n=16, dim = 128
    //       |  0_0  0_1  0_2  0_3 ...  0_125  0_126  0_127 |
    //       |  1_0  1_1  1_2  1_3 ...  1_125  1_126  1_127 |
    //       |        .                          .          |
    //       |        .                          .          |
    //       | 14_0 14_1 14_2 14_3 ... 14_125 14_126 14_127 |
    //       | 15_0 15_1 15_2 15_3 ... 15_125 15_126 15_127 |
    //                              | shape dims 2: (dim/16 X n/16) X (16 X 16), 
    //             after Zz format    dims4: (n/16) X (dim/16) X 16 X 16
    //       |   0_0   0_1 ...  0_14  0_15   1_0   1_1 ...  1_15 ...   15_15 |
    //       |  0_16  0_17 ...  0_30  0_31  1_16  1_17 ...  1_31 ...   15_31 |
    //       |        .                    .                  .         .    |
    //       |        .                    .                  .         .    |
    //       |  0_96  0_97 ... 0_110 0_111  1_96  1_97 ... 1_111 ...  15_111 |
    //       | 0_112 0_113 ... 0_126 0_127 1_112 1_113 ... 1_127 ...  15_127 |
    // n and dim must be 16 aligned, otherwise padding data is needed.
    AscendTensor<uint8_t, DIMS_2> codesData(const_cast<uint8_t *>(codes), { static_cast<int>(numVecs), this->dims });
    size_t originLen = getListLength(listId);
    size_t tmpLen = utils::roundUp((originLen + numVecs), static_cast<size_t>(CUBE_ALIGN_SIZE));
    deviceListData[listId]->resize(tmpLen * this->dims);
    preComputeData[listId]->resize(tmpLen);

    // dims is alignd with CUBE_ALIGN_SIZE, no padding data in horizontal direction
    int dimShaped = utils::divUp(this->dims, CUBE_ALIGN_SIZE);

// input codes are contigous(numVecs X dims), reconstruct the codes into Zz format.
#pragma omp parallel for if (numVecs >= 100)
    for (size_t i = 0; i < numVecs; i++) {
        int seq = static_cast<int>(originLen + i);
        uint8_t *tmpData = static_cast<uint8_t *>(deviceListData[listId]->data()) + getShapedDataOffset(seq);

        for (int j = 0; j < dimShaped; j++) {
            MEMCPY_S(tmpData, CUBE_ALIGN_SIZE * sizeof(uint8_t),
                codesData[i][j * CUBE_ALIGN_SIZE].data(), CUBE_ALIGN_SIZE * sizeof(uint8_t));
            tmpData += (CUBE_ALIGN_SIZE * CUBE_ALIGN_SIZE);
        }
    }

    float *precompData = preComputeData[listId]->data() + originLen;
    MEMCPY_S(precompData, numVecs * sizeof(float), preCompute, numVecs * sizeof(float));
    deviceListIndices[listId]->append(indices, numVecs);

    maxListLength = std::max(maxListLength, static_cast<int>(getListLength(listId)));
    maxListLength = utils::roundUp(maxListLength, CUBE_ALIGN_SIZE);
    this->ntotal += numVecs;
}

void IndexIVFSQ::updateCoarseCentroidsData(AscendTensor<float16_t, DIMS_2> &coarseCentroidsData)
{
    // update coarse centroids for L1 search.
    IndexIVF::updateCoarseCentroidsData(coarseCentroidsData);

    // isTrained need to be set when all trained values are updated.
    // if vMin has been updated, set isTrained = true
    if (this->vMin.data()) {
        this->isTrained = true;
    }
}

void IndexIVFSQ::updateTrainedValue(AscendTensor<float16_t, DIMS_1> &trainedMin,
    AscendTensor<float16_t, DIMS_1> &trainedDiff)
{
    int dimMin = trainedMin.getSize(0);
    int dimDiff = trainedDiff.getSize(0);
    ASCEND_THROW_IF_NOT_FMT(dimMin == dimDiff && dimMin == this->dims,
        "sq trained data's shape invalid.(%d, %d) vs (dim:%d)", dimMin, dimDiff, this->dims);

    AscendTensor<float16_t, DIMS_1> minTensor({ dimMin });
    AscendTensor<float16_t, DIMS_1> diffTensor({ dimDiff });
    minTensor.copyFromSync(trainedMin);
    diffTensor.copyFromSync(trainedDiff);
    vMin = std::move(minTensor);
    vDiff = std::move(diffTensor);

    // isTrained need to be set when all trained values are updated.
    // if coarseCentroids has been updated, set isTrained = true
    if (this->coarseCentroids.data()) {
        this->isTrained = true;
    }
}

void IndexIVFSQ::addImpl(int n, const float16_t *x, const idx_t *ids)
{
    VALUE_UNUSED(n);
    VALUE_UNUSED(x);
    VALUE_UNUSED(ids);
}

void IndexIVFSQ::calcResiduals(AscendTensor<float16_t, DIMS_2> &query,
    AscendTensor<uint32_t, DIMS_2> &nprobeIndices, AscendTensor<float16_t, DIMS_3> &residulas)
{
    const int parallelN = 8;
    int n = query.getSize(0);
    int dim = query.getSize(1);
    int probes = nprobeIndices.getSize(1);
    ASCEND_ASSERT(probes == this->nprobe);
    ASCEND_ASSERT(dim == this->dims);

    // query - L1 coarse centroids
    auto calc = [&](int idx) {
        for (int j = 0; j < n; j++) {
            int list = nprobeIndices[j][idx].value();
            for (int k = 0; k < dim; k++) {
                residulas[j][idx][k] = query[j][k].value() - coarseCentroids[list][k].value();
            }
        }
    };

    // parallel caculating the residuals
    CALL_PARALLEL_FUNCTOR_NOEXCEPTION(n, parallelN, probes, threadPool, calc);
}

void IndexIVFSQ::searchImplL2(AscendTensor<float16_t, DIMS_2> &queries,
                              AscendTensor<uint32_t, DIMS_2> &l1Indices,
                              AscendTensor<float16_t, DIMS_2> &outDistances,
                              AscendTensor<uint32_t, DIMS_2> &outIndices)
{
    auto stream = resources.getDefaultStream();
    auto &mem = resources.getMemoryManager();
    int n = queries.getSize(0);
    int maxScanSeg = utils::divUp(maxListLength, SEARCH_LIST_SIZE);

    // residual calculate, query - L1 coarse centroids
    AscendTensor<float16_t, DIMS_3> queriesL2(queries.data(), { n, 1, dims });
    if (byResidual) {
        AscendTensor<float16_t, DIMS_3> residulas(mem, { n, nprobe, dims }, stream);
        calcResiduals(queries, l1Indices, residulas);
        queriesL2 = std::move(residulas);
    }

    // tensor for operator flags
    AscendTensor<uint16_t, DIMS_3> opFlag(mem, { n, nprobe, (maxScanSeg * FLAG_ALIGN_SIZE) }, stream);
    (void)opFlag.zero();
    // tensor for telling operator how many code to calculate
    AscendTensor<uint32_t, DIMS_3> opSize(mem, { n, nprobe, (maxScanSeg * SIZE_ALIGN_SIZE) }, stream);
    // tensor for operator outputing sq distance
    AscendTensor<float16_t, DIMS_3> distResult(mem, {n, nprobe, maxListLength}, stream);
    
    std::vector<std::vector<QueueItem>> topkQueue(n, std::vector<QueueItem>((maxScanSeg * nprobe)));
    std::vector<std::future<void>> topkFunctorRet;
    std::vector<std::pair<volatile bool, volatile int>> executeInfo(n, {false, 0});
    bool errorQuit = false;

    // topk functor
    auto topkFunctor = [&] (int idx) {
        // bind thread to fixed cpus for stablity time costing,
        // bind from cpu 0 to cpu 3, cpu 4-5 is for main thread.
        if (idx < THREADS_CNT) {
            AscendUtils::attachToCpu(idx);
        }

        auto outDistance = outDistances[idx].view();
        auto outIndice = outIndices[idx].view();
        int iter = 0;
        while (!errorQuit && (!executeInfo[idx].first || iter < executeInfo[idx].second)) {
            while (iter < executeInfo[idx].second) {
                auto& item = topkQueue[idx][iter];

                // waitting for the item operator to be added to the stream to run 
                WAITING_FLAG_READY((item.IsExecuting()), TIMEOUT_CHECK_TICK, TIMEOUT_MS);
                
                AscendTensor<float16_t, DIMS_1> distance(item.distPtr, {item.size});
                AscendTensor<uint32_t, DIMS_1> ids(item.idPtr, {item.size});

                // waitting for the operator finishing running, while the flags will be 
                // setted by operator when finishing running.
                WAITING_FLAG_READY((*item.flagPtr && *(item.flagPtrSec)), TIMEOUT_CHECK_TICK, TIMEOUT_MS);
                ASCEND_THROW_IF_NOT(topkOp.exec(distance, ids, outDistance, outIndice));
                iter++;
            }
        }

        // reorder the results in distance's ascending order
        if (!errorQuit) {
            topkOp.reorder(outDistance, outIndice);
        }
    };

    for (int i = 0; i < n; i++) {
        // add topkFunctor task to threadpool for async executing
        topkFunctorRet.emplace_back(threadPool->Enqueue(topkFunctor, i));
    }

    // divid n to several batch(batch size is THREADS_CNT) to query, because 
    // threadpool have THREADS_CNT threads, THREADS_CNT is the maximum paralles.
    int tSize = utils::divUp(n, THREADS_CNT);
    for (int i = 0; i < tSize; i++) {
        int nSize = std::min(THREADS_CNT, (n - i * THREADS_CNT));
        int nStart = i * THREADS_CNT;
        int nEnd = nStart + nSize;
        int nScans = nSize * nprobe;

        // loop sequence is q0p0 q1p0 q2p0 q3p0 ... q0pn ... qnpn, in order for the balance 
        // of every query added to stream for op executing.
        for (int j = 0; j < nScans; j++) {
            int nIdx = j % nSize + nStart;
            int probeIdx = j / nSize;
            AscendTensor<float16_t, DIMS_2> query(queriesL2[nIdx][(byResidual ? probeIdx : 0)].data(), {1, dims});
            int list = l1Indices[nIdx][probeIdx].value();
            
            // seperator list's code for several segs to run sqdistance, 
            // because of fixed-shape limitation of aicore's operator.
            int segs = utils::divUp(deviceListIndices[list]->size(), SEARCH_LIST_SIZE);
            for (int m = 0; m < segs; m++) {
                int offset = m * SEARCH_LIST_SIZE;
                uint32_t size = std::min(static_cast<uint32_t>(SEARCH_LIST_SIZE), 
                    static_cast<uint32_t>((deviceListIndices[list]->size() - offset)));

                // code is stored in Zz format, Zz format is 4 dims shaped. z's shape is 
                // 16 X 16(AiCore cube's matrix operation's size). Z's shape is (SEARCH_LIST_SIZE / 16) X (dims / 16).
                // Zz's 4 dims shape is ((SEARCH_LIST_SIZE / 16), (dims / 16), 16, 16)
                AscendTensor<uint8_t, DIMS_4> code(static_cast<uint8_t *>(deviceListData[list]->data()) + dims * offset, 
                    {SEARCH_SHAPED_SIZE, dims / CUBE_ALIGN_SIZE, CUBE_ALIGN_SIZE, CUBE_ALIGN_SIZE});
                AscendTensor<float, DIMS_1> precomp(preComputeData[list]->data() + offset, {SEARCH_LIST_SIZE});
                AscendTensor<uint16_t, DIMS_1> flag(opFlag[nIdx][probeIdx][m * FLAG_ALIGN_SIZE].data(), {FLAG_ALIGN_SIZE});
                AscendTensor<uint32_t, DIMS_1> actualSize(opSize[nIdx][probeIdx][m * SIZE_ALIGN_SIZE].data(), {SIZE_ALIGN_SIZE});
                AscendTensor<float16_t, DIMS_2> result(distResult[nIdx][probeIdx][offset].data(), {1, SEARCH_LIST_SIZE});
                actualSize[0] = size;
                runSqDistOperator(query, code, precomp, vDiff, vMin, actualSize, result, flag, stream);
                topkQueue[nIdx][executeInfo[nIdx].second].SetExecuting(result.data(), 
                    deviceListIndices[list]->data() + offset, flag.data(), size);
                executeInfo[nIdx].second++;
            }
        }

        // set quit flags to true, the flags will be used by topk 
        // functor to check whether all operators have been added to the stream. 
        for_each(executeInfo.begin() + nStart, executeInfo.begin() + nEnd, [](auto& item) { item.first = true; });
    }

    // waiting for topk functor to finish
    int topkWaitIdx = 0;
    try {
        for_each(topkFunctorRet.begin(), topkFunctorRet.end(), [&](auto& ret) { topkWaitIdx++; ret.get(); });
    } catch (AscendException &e) {
        // if exception occures, waitting for the rest topkFunctor to quit.
        errorQuit = true;
        for_each(topkFunctorRet.begin() + topkWaitIdx, topkFunctorRet.end(), [](auto& ret) { ret.wait(); });
        ASCEND_THROW_MSG(e.what());
    }
}

void IndexIVFSQ::searchImplL1(AscendTensor<float16_t, DIMS_2> &queries, AscendTensor<uint32_t, DIMS_2> &result,
    aclrtStream stream)
{
    int n = queries.getSize(0);
    auto &mem = resources.getMemoryManager();
    AscendTensor<float16_t, DIMS_2> distances(mem, { n, numLists }, stream);
    AscendTensor<float16_t, DIMS_2> kDistance(mem, { n, nprobe }, stream);
    AscendTensor<uint32_t, DIMS_2> indices;

    // init results to invalid data.
    kDistance.initValue(Limits<float16_t>::getMax());
    result.initValue(std::numeric_limits<uint32_t>::max());

    // run l1 distance calculation
    runDistanceCompute(queries, coarseCentroidsShaped, normCoarseCentroids, distances, stream);
    resources.syncDefaultStream();

    // get the kNN results, k = nprobe. reorder is not needed
    // because L2 search do not concern which list is the nearest.
    ASCEND_THROW_IF_NOT(topkOp.exec(distances, indices, kDistance, result));

    // check whether the lists are in valid range, fp16 overflow may result in list's out of range.
    for (int i = 0; i < n; i++) {
        for (int j = 0; j < nprobe; j++) {
            int list = result[i][j].value();
            ASCEND_THROW_IF_NOT(list >= 0 && list < numLists);
        }
    }
}

void IndexIVFSQ::searchImpl(int n, const float16_t *x, int k, float16_t *distances, idx_t *labels)
{
    auto stream = resources.getDefaultStream();
    auto &mem = resources.getMemoryManager();
    AscendTensor<float16_t, DIMS_2> queries(const_cast<float16_t *>(x), { n, dims });
    AscendTensor<float16_t, DIMS_2> outDistances(distances, { n, k });
    AscendTensor<uint32_t, DIMS_2> outIndices(labels, { n, k });

    // init results to invalid data.
    outDistances.initValue(Limits<float16_t>::getMax());
    outIndices.initValue(std::numeric_limits<uint32_t>::max());

    // for performance improving, bind the main thread to cpu4-5,
    // and bind the threadpool to cpu0-cpu3. when n == 1, attach
    // main thread to one cpu(cpu5) is better than multicpus.
    if (n > 1) {
        AscendUtils::attachToCpus({ 4, 5 });
    } else {
        AscendUtils::attachToCpus({ 5 });
    }

    // L1 search, to find nprobe IVF list
    AscendTensor<uint32_t, DIMS_2> l1result(mem, { n, nprobe }, stream);
    searchImplL1(queries, l1result, stream);

    // L2 search, search codes in nprobe IVF list to find topk results
    searchImplL2(queries, l1result, outDistances, outIndices);

    // reattach cpus to cpu set { 0, 1, 2, 3, 4, 5 }
    AscendUtils::attachToCpus({ 0, 1, 2, 3, 4, 5 });
}

size_t IndexIVFSQ::removeIdsImpl(const IDSelector &sel)
{
    //
    // | id0 id1 id2 ... id98 id99 id100 |
    //            ^                  |
    //            |__________________|
    // if id2 is to be removed, copy the last id(id100) to
    // the mem place of id2, and decrease the list size to size-1.
    size_t removeCnt = 0;

    // dims is alignd with CUBE_ALIGN_SIZE, no padding data in horizontal direction
    int dimShaped = utils::divUp(this->dims, CUBE_ALIGN_SIZE);

#pragma omp parallel for reduction(+ : removeCnt)
    for (int id = 0; id < numLists; id++) {
        DeviceScope device;
        auto &indicesList = deviceListIndices[id];
        auto &codeList = deviceListData[id];
        auto &precompList = preComputeData[id];

        uint32_t *indicesPtr = indicesList->data();
        float *precompPtr = precompList->data();
        uint8_t *codePtr = static_cast<uint8_t *>(codeList->data());
        bool hasMoved = false;
        int j = indicesList->size() - 1;
        for (int i = 0; i <= j; ) {
            if (sel.is_member((*indicesList)[i])) {
                MEMCPY_S(indicesPtr + i, sizeof(uint32_t), indicesPtr + j, sizeof(uint32_t));
                MEMCPY_S(precompPtr + i, sizeof(float), precompPtr + j, sizeof(float));

                uint8_t *src = codePtr + getShapedDataOffset(j);
                uint8_t *dst = codePtr + getShapedDataOffset(i);
                for (int k = 0; k < dimShaped; k++) {
                    MEMCPY_S(dst, CUBE_ALIGN_SIZE * sizeof(uint8_t), src, CUBE_ALIGN_SIZE * sizeof(uint8_t));
                    src += (CUBE_ALIGN_SIZE * CUBE_ALIGN_SIZE);
                    dst += (CUBE_ALIGN_SIZE * CUBE_ALIGN_SIZE);
                }

                j--;
                removeCnt++;
                hasMoved = true;
            } else {
                i++;
            }
        }

        // if some code has been removed, list need to be resize and reclaim memory
        if (hasMoved) {
            size_t tmpLen = utils::roundUp((j + 1), CUBE_ALIGN_SIZE);
            indicesList->resize(j + 1);
            codeList->resize(tmpLen * this->dims);
            precompList->resize(tmpLen);
            indicesList->reclaim(false);
            codeList->reclaim(false);
            precompList->reclaim(false);
        }
    }

    this->ntotal -= removeCnt;
    return removeCnt;
}

void IndexIVFSQ::resetSqDistOperator()
{
    distSqOp.reset();
    AscendOpDesc desc("DistanceComputeSQ8");
    std::vector<int64_t> queryShape({ 1, dims });
    std::vector<int64_t> codeShape({ SEARCH_SHAPED_SIZE, dims / CUBE_ALIGN_SIZE, CUBE_ALIGN_SIZE, CUBE_ALIGN_SIZE });
    std::vector<int64_t> precompShape({ SEARCH_LIST_SIZE });
    std::vector<int64_t> vdiffShape({ dims });
    std::vector<int64_t> vminShape({ dims });
    std::vector<int64_t> sizeShape({ SIZE_ALIGN_SIZE });
    std::vector<int64_t> resultShape({ 1, SEARCH_LIST_SIZE });
    std::vector<int64_t> flagShape({ FLAG_ALIGN_SIZE });
    desc.addInputTensorDesc(ACL_FLOAT16, queryShape.size(), queryShape.data(), ACL_FORMAT_ND);
    desc.addInputTensorDesc(ACL_UINT8, codeShape.size(), codeShape.data(), ACL_FORMAT_ND);
    desc.addInputTensorDesc(ACL_FLOAT, precompShape.size(), precompShape.data(), ACL_FORMAT_ND);
    desc.addInputTensorDesc(ACL_FLOAT16, vdiffShape.size(), vdiffShape.data(), ACL_FORMAT_ND);
    desc.addInputTensorDesc(ACL_FLOAT16, vminShape.size(), vminShape.data(), ACL_FORMAT_ND);
    desc.addInputTensorDesc(ACL_UINT32, sizeShape.size(), sizeShape.data(), ACL_FORMAT_ND);
    desc.addOutputTensorDesc(ACL_FLOAT16, resultShape.size(), resultShape.data(), ACL_FORMAT_ND);
    desc.addOutputTensorDesc(ACL_UINT16, flagShape.size(), flagShape.data(), ACL_FORMAT_ND);

    distSqOp = std::make_unique<AscendOperator>(desc);
}

void IndexIVFSQ::runSqDistOperator(AscendTensor<float16_t, DIMS_2> &queries, AscendTensor<uint8_t, DIMS_4> &codes,
    AscendTensor<float, DIMS_1> &precomp, AscendTensor<float16_t, DIMS_1> &vdiff, AscendTensor<float16_t, DIMS_1> &vmin,
    AscendTensor<uint32_t, DIMS_1> &size, AscendTensor<float16_t, DIMS_2> &result, AscendTensor<uint16_t, DIMS_1> &flag,
    aclrtStream stream)
{
    ASCEND_ASSERT(distSqOp.get());
    // prepare for input data's buffer
    distSqOpInput[0] = aclCreateDataBuffer(queries.data(), queries.getSizeInBytes()); // input 0
    distSqOpInput[1] = aclCreateDataBuffer(codes.data(), codes.getSizeInBytes());     // input 1
    distSqOpInput[2] = aclCreateDataBuffer(precomp.data(), precomp.getSizeInBytes()); // input 2
    distSqOpInput[3] = aclCreateDataBuffer(vdiff.data(), vdiff.getSizeInBytes());     // input 3
    distSqOpInput[4] = aclCreateDataBuffer(vmin.data(), vmin.getSizeInBytes());       // input 4
    distSqOpInput[5] = aclCreateDataBuffer(size.data(), size.getSizeInBytes());       // input 5

    // prepare for output data's buffer
    distSqOpOutput[0] = aclCreateDataBuffer(result.data(), result.getSizeInBytes());  // output 0
    distSqOpOutput[1] = aclCreateDataBuffer(flag.data(), flag.getSizeInBytes());      // output 1

    // async executing operator
    distSqOp->exec(distSqOpInput, distSqOpOutput, stream);

    for (auto &item : distSqOpInput) {
        ACL_REQUIRE_OK(aclDestroyDataBuffer(item));
    }

    for (auto &item : distSqOpOutput) {
        ACL_REQUIRE_OK(aclDestroyDataBuffer(item));
    }
}

int IndexIVFSQ::getShapedDataOffset(int idx) const
{
    int offset = this->dims * utils::roundDown(idx, CUBE_ALIGN_SIZE);
    offset += (idx % CUBE_ALIGN_SIZE) * CUBE_ALIGN_SIZE;
    return offset;
}

DeviceVector<float> &IndexIVFSQ::getListPrecompute(int listId) const
{
    ASCEND_THROW_IF_NOT((listId < numLists) && (listId >= 0));
    return *preComputeData[listId];
}

bool IndexIVFSQ::listVectorsNeedReshaped() const
{
    return true;
}

void IndexIVFSQ::getListVectorsReshaped(int listId, std::vector<unsigned char> &reshaped) const
{
    ASCEND_THROW_IF_NOT((listId < numLists) && (listId >= 0));

    size_t size = getListLength(listId);
    auto &data = getListVectors(listId);
    int dimShaped = utils::divUp(this->dims, CUBE_ALIGN_SIZE);
    reshaped.resize(size * this->dims);

// reshape code from Zz format data to contigous format.
#pragma omp parallel for if (size >= 100)
    for (size_t i = 0; i < size; i++) {
        int offset = getShapedDataOffset(i);
        auto srcPtr = data.data() + offset;
        auto dstPtr = reshaped.data() + i * this->dims * sizeof(unsigned char);
        for (int j = 0; j < dimShaped; j++) {
            MEMCPY_S(dstPtr + j * CUBE_ALIGN_SIZE,
                     CUBE_ALIGN_SIZE * sizeof(unsigned char),
                     srcPtr + j * CUBE_ALIGN_SIZE * CUBE_ALIGN_SIZE,
                     CUBE_ALIGN_SIZE * sizeof(unsigned char));
        }
    }
}
} // ascend
