/*
 * Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ASCEND_INDEXSQ_INCLUDED
#define ASCEND_INDEXSQ_INCLUDED

#include <ascenddaemon/utils/AscendTensor.h>
#include <ascenddaemon/utils/DeviceVector.h>
#include <ascenddaemon/impl/Index.h>
#include <ascenddaemon/impl/AscendOperator.h>
#include <ascenddaemon/utils/AscendThreadPool.h>
#include <memory>

namespace ascend {
class IndexSQ : public Index {
public:
    IndexSQ(int dim);

    ~IndexSQ();

    void reset() override;

    void addVectors(size_t numVecs, const uint8_t *data, const float *preCompute);

    inline int getSize() const
    {
        return ntotal;
    }

    void getVectors(uint32_t offset, uint32_t num, std::vector<uint8_t> &vectors);

    inline int getDim() const
    {
        return dims;
    }

    inline int getDistComputeBatch() const
    {
        return distComputeBatch;
    }

    inline const std::vector<std::unique_ptr<DeviceVector<uint8_t>>> &getCodes() const
    {
        return codes;
    }

    inline const std::vector<std::unique_ptr<DeviceVector<float>>> &getPreCompute() const
    {
        return preCompute;
    }

    void updateTrainedValue(AscendTensor<float16_t, DIMS_1> &trainedMin, AscendTensor<float16_t, DIMS_1> &trainedDiff);

private:
    void addImpl(int n, const float16_t *x, const idx_t *ids) override;

    void memcpy2Codes(int vecIndex, int dVecIndex, int numVecs, const uint8_t *data);

    void copy2PreCompute(int vecIndex, int dVecIndex, int numVecs, const float *preCompute);

    void searchImpl(AscendTensor<float16_t, DIMS_2>& queries, int k,
                    AscendTensor<float16_t, DIMS_2>& outDistance, AscendTensor<uint32_t, DIMS_2>& outIndices);

    void searchImpl(int n, const float16_t *x, int k, float16_t *distances, idx_t *labels) override;

    void runSqDistOperator(AscendTensor<float16_t, DIMS_2> &queries,
                            AscendTensor<uint8_t, DIMS_4> &codes,
                            AscendTensor<float, DIMS_1> &precomp,
                            AscendTensor<float16_t, DIMS_1> &vdiff,
                            AscendTensor<float16_t, DIMS_1> &vmin,
                            AscendTensor<uint32_t, DIMS_1> &size,
                            AscendTensor<float16_t, DIMS_2> &result,
                            AscendTensor<uint16_t, DIMS_1> &flag,
                            aclrtStream stream);

    void resetSqDistOperator();

    size_t removeIdsImpl(const IDSelector &sel) override;
    size_t removeIdsBatch(const std::vector<idx_t> &indices);
    size_t removeIdsRange(idx_t min, idx_t max);

    void moveCodesForward(idx_t srcIdx, idx_t destIdx);
    void movePreComputeForward(idx_t srcIdx, idx_t destIdx);
    inline void moveVectorForward(idx_t srcIdx, idx_t destIdx)
    {
        moveCodesForward(srcIdx, destIdx);
        movePreComputeForward(srcIdx, destIdx);
    }
    void releaseUnusageSpace(int oldTotal, int remove);

private:
    int distComputeBatch;
    int devVecCapacity;

    AscendTensor<float16_t, DIMS_1> vMin;
    AscendTensor<float16_t, DIMS_1> vDiff;

    ThreadPool *threadPool;

    std::vector<std::unique_ptr<DeviceVector<uint8_t>>> codes;
    // precompute Data list
    std::vector<std::unique_ptr<DeviceVector<float>>> preCompute;
};
} // namespace ascend

#endif // ASCEND_INDEXSQ_INCLUDED
