// Copyright (C) 2019-2023 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.

#include <cmath>
#include <mutex>
#include <thread>
#include <unordered_map>
#include <sys/time.h>

#include <faiss/ascend/AscendIndexFlat.h>
#include <faiss/ascend/AscendCloner.h>
#include <faiss/ascend/AscendClonerOptions.h>
#include <faiss/impl/io.h>
#include <faiss/index_io.h>
#include <faiss/MetricType.h>

#include "common/ascend/ascend_utils.h"
#include "index/ascend/ascendflat_config.h"
#include "knowhere/comp/thread_pool.h"
#include "knowhere/index/index_factory.h"

namespace knowhere {
namespace {
using knowhere::ascend::ascend_device_manager;
std::mutex mtx;
size_t magic_num = 2;

struct MyMemoryIOWriter : public faiss::IOWriter {
    uint8_t* data_ = nullptr;
    size_t total = 0;
    size_t rp = 0;

    size_t
    operator()(const void* ptr, size_t size, size_t nitems) override {
        auto total_need = size * nitems + rp;

        if (data_ == nullptr) {
            total = total_need * magic_num;
            rp = size * nitems;
            data_ = new uint8_t[total];
            memcpy(data_, ptr, rp);
            return nitems;
        }

        if (total_need > total) {
            total = total_need * magic_num;
            auto new_data = new uint8_t[total];
            memcpy(new_data, data_, rp);
            delete[] data_;
            data_ = new_data;

            memcpy((data_ + rp), ptr, size * nitems);
            rp = total_need;
        } else {
            memcpy((data_ + rp), ptr, size * nitems);
            rp = total_need;
        }

        return nitems;
    }

    template <typename T>
    size_t
    write(T* ptr, size_t size, size_t nitems = 1) {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
        for (size_t i = 0; i < nitems; ++i) {
            *(ptr + i) = getSwappedBytes(*(ptr + i));
        }

#endif
        return operator()((const void*)ptr, size, nitems);
    }
};

struct MyMemoryIOReader : public faiss::IOReader {
    uint8_t* data_;
    size_t rp = 0;
    size_t total = 0;

    size_t
    operator()(void* ptr, size_t size, size_t nitems) override {
        if (rp >= total) {
            return 0;
        }
        size_t nremain = (total - rp) / size;
        if (nremain < nitems) {
            nitems = nremain;
        }
        memcpy(ptr, (data_ + rp), size * nitems);
        rp += size * nitems;
        return nitems;
    }

    template <typename T>
    size_t
    read(T* ptr, size_t size, size_t nitems = 1) {
        auto res = operator()((void*)ptr, size, nitems);
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
        for (size_t i = 0; i < nitems; ++i) {
            *(ptr + i) = getSwappedBytes(*(ptr + i));
        }
#endif

        return res;
    }
};

double GetMillisecs() {
    struct timeval tv;
    gettimeofday(&tv, nullptr);
    const double sec2msec = 1e3;
    const double usec2msec = 1e-3;
    return tv.tv_sec * sec2msec + tv.tv_usec * usec2msec;
}

Status
Str2FaissMetricType(std::string metric, faiss::MetricType &metric_type) {
    static const std::unordered_map<std::string, faiss::MetricType> metric_map = {
        {metric::L2, faiss::MetricType::METRIC_L2},
        {metric::IP, faiss::MetricType::METRIC_INNER_PRODUCT},
    };

    std::transform(metric.begin(), metric.end(), metric.begin(), toupper);
    auto it = metric_map.find(metric);
    if (it == metric_map.end()) {
        return Status::invalid_metric_type;
    }
    metric_type = it->second;
    return Status::success;
}
}

class AscnedFlatIndexNode : public IndexNode {
 public:
    AscnedFlatIndexNode(const int32_t version, const Object&) : index_(nullptr) {
        LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode construct:"
            << std::this_thread::get_id() << " this:" << int64_t(this);
        pool_ = ThreadPool::GetGlobalSearchThreadPool();
    }

    Status
    Build(const DataSetPtr dataset, std::shared_ptr<Config> cfg, bool use_knowhere_build_pool = true) override {
        LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Build:"
            << std::this_thread::get_id() << " this:" << int64_t(this);
        auto err = Train(dataset, cfg, use_knowhere_build_pool);
        if (err != Status::success) {
            return err;
        }
        return Add(dataset, std::move(cfg), use_knowhere_build_pool);
    }

    Status
    Train(const DataSetPtr dataset, std::shared_ptr<Config> cfg, bool use_knowhere_build_pool = true) override {
        const AscendFlatConfig& f_cfg = static_cast<const AscendFlatConfig&>(*cfg);
        faiss::MetricType metric;
        auto err = Str2FaissMetricType(f_cfg.metric_type.value(), metric);
        if (err != Status::success) {
            LOG_KNOWHERE_ERROR_ << "please check metric type, " << f_cfg.metric_type.value();
            return err;
        }
        LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Train:"
            << std::this_thread::get_id() << " this:" << int64_t(this)
            << " dim: " << dataset->GetDim() << " metric type: " << f_cfg.metric_type.value();

        try {
            int32_t device_id = 0;
            RANDOM_CHOOSE_ASCEND_DEVICE_WITH_ASSIGN(device_id);
            std::vector<int> devices{ device_id }; // 选择方案：一个ascend index，使用一张卡
            int64_t resource_size = 1 * static_cast<int64_t>(1024 * 1024 * 1024);
            faiss::ascend::AscendIndexFlatConfig conf(devices, resource_size);

            std::lock_guard<std::mutex> lk(*(ascend_device_manager::instance().get_dev_mutex_ptr(device_id)));
            LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Train on device:"
                << device_id << ", thread-" << std::this_thread::get_id() << " this:" << int64_t(this);
            index_ = std::make_unique<faiss::ascend::AscendIndexFlat>(
                static_cast<int>(dataset->GetDim()), metric, conf);
        } catch (const std::exception& e) {
            LOG_KNOWHERE_ERROR_ << "error create ascend index, " << e.what();
            return Status::ascend_inner_error;
        }

        return Status::success;
    }

    Status
    Add(const DataSetPtr dataset, std::shared_ptr<Config> cfg, bool use_knowhere_build_pool = true) override {
        auto x = dataset->GetTensor();
        auto n = dataset->GetRows();
        LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Add:"
            << std::this_thread::get_id() << " this:" << int64_t(this) << " n:" << n;

        try {
            std::shared_ptr<float> codes(new float[n * static_cast<int64_t>(index_->d)],
                std::default_delete<float[]>());
            Quantization(static_cast<const float*>(x), n, static_cast<int64_t>(index_->d), codes.get());
            int32_t device_id = index_->getDeviceList()[0];
            std::lock_guard<std::mutex> lk(*(ascend_device_manager::instance().get_dev_mutex_ptr(device_id)));
            index_->add(n, codes.get());
        } catch (const std::exception& e) {
            LOG_KNOWHERE_ERROR_ << "error add data, " << e.what();
            return Status::ascend_inner_error;
        }

        LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Add finished:"
            << std::this_thread::get_id() << " this:" << int64_t(this) << " n:" << n;
        return Status::success;
    }

    expected<DataSetPtr>
    Search(const DataSetPtr dataset, std::unique_ptr<Config> cfg, const BitsetView& bitset) const override {
        if (index_ == nullptr) {
            LOG_KNOWHERE_ERROR_ << "search on empty index";
            expected<DataSetPtr>::Err(Status::empty_index, "index not loaded");
        }

        DataSetPtr results = std::make_shared<DataSet>();
        const AscendFlatConfig& f_cfg = static_cast<const AscendFlatConfig&>(*cfg);
        auto k = f_cfg.k.value();
        auto nq = dataset->GetRows();
        auto x = dataset->GetTensor();

        auto len = k * nq;
        int64_t* ids = nullptr;
        float* distances = nullptr;

        try {
            std::shared_ptr<float> codes(new float[nq * static_cast<int64_t>(index_->d)],
                std::default_delete<float[]>());
            Quantization(static_cast<const float*>(x), nq, static_cast<int64_t>(index_->d), codes.get());

            ids = new int64_t[len];
            distances = new float[len];

            int32_t device_id = index_->getDeviceList()[0];
            if (bitset.empty() || (bitset.count() == 0)) {
                std::lock_guard<std::mutex> lk(*(ascend_device_manager::instance().get_dev_mutex_ptr(device_id)));
                LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Search:"
                    << std::this_thread::get_id() << " this:" << int64_t(this)
                    << " nq:" << nq << " k:" << k << " ntotal:" << index_->ntotal;
                double t1 = GetMillisecs();
                index_->search(nq, codes.get(), k, distances, ids);
                double t2 = GetMillisecs();
                LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Search finished:" << t2 - t1;
            } else {
                // trans bitset to ascend mask
                size_t byte_size = bitset.byte_size();
                std::shared_ptr<uint8_t> masks(new uint8_t[byte_size * static_cast<size_t>(nq)],
                    std::default_delete<uint8_t[]>());
                Bitset2Mask(bitset, nq, masks.get());
                std::lock_guard<std::mutex> lk(*(ascend_device_manager::instance().get_dev_mutex_ptr(device_id)));
                LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Search with mask:"
                    << std::this_thread::get_id() << " this:" << int64_t(this)
                    << " nq:" << nq << " k:" << k << " ntotal:" << index_->ntotal;
                double t1 = GetMillisecs();
                index_->search_with_masks(nq, codes.get(), k, distances, ids, masks.get());
                double t2 = GetMillisecs();
                LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Search with mask finished:" << t2 - t1;
            }
        } catch (const std::exception& e) {
            std::unique_ptr<int64_t[]> auto_delete_ids(ids);
            std::unique_ptr<float[]> auto_delete_dis(distances);
            LOG_KNOWHERE_ERROR_ << "error inner ascend, " << e.what();
            return expected<DataSetPtr>::Err(Status::ascend_inner_error, e.what());
        }

        return GenResultDataSet(nq, k, ids, distances);
    }

    expected<DataSetPtr>
    RangeSearch(const DataSetPtr dataset, std::unique_ptr<Config> cfg, const BitsetView& bitset) const override {
        LOG_KNOWHERE_ERROR_ << "ascend AscnedFlatIndexNode RangeSearch not implemented";
        return expected<DataSetPtr>::Err(Status::not_implemented, "RangeSearch not implemented");
    }

    expected<DataSetPtr>
    GetVectorByIds(const DataSetPtr dataset) const override {
        LOG_KNOWHERE_ERROR_ << "ascend AscnedFlatIndexNode GetVectorByIds not implemented";
        return expected<DataSetPtr>::Err(Status::not_implemented, "GetVectorByIds not implemented");
    }

    bool
    HasRawData(const std::string& metric_type) const override {
        LOG_KNOWHERE_ERROR_ << "ascend AscnedFlatIndexNode HasRawData not implemented";
        return false;
    }

    expected<DataSetPtr>
    GetIndexMeta(std::unique_ptr<Config> cfg) const override {
        LOG_KNOWHERE_ERROR_ << "ascend AscnedFlatIndexNode GetIndexMeta not implemented";
        return expected<DataSetPtr>::Err(Status::not_implemented, "GetIndexMeta not implemented");
    }

    Status
    Serialize(BinarySet& binset) const override {   
        LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Serialize:"
            << std::this_thread::get_id() << " this:" << int64_t(this);

        if (index_ == nullptr) {
            LOG_KNOWHERE_ERROR_ << "index_ not initilized";
            expected<DataSetPtr>::Err(Status::empty_index, "index not loaded");
        }

        try {
            MyMemoryIOWriter writer;
            int32_t device_id = index_->getDeviceList()[0];
            std::lock_guard<std::mutex> lk(*(ascend_device_manager::instance().get_dev_mutex_ptr(device_id)));
            std::unique_ptr<faiss::Index> cpu_index(faiss::ascend::index_ascend_to_cpu(index_.get()));
            faiss::write_index(cpu_index.get(), &writer);
            std::shared_ptr<uint8_t[]> data(writer.data_);
            binset.Append(Type(), data, writer.rp);
        } catch (const std::exception& e) {
            LOG_KNOWHERE_ERROR_ << "error inner ascend, " << e.what();
            return Status::ascend_inner_error;
        }
        LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Serialize finished:"
            << std::this_thread::get_id() << " this:" << int64_t(this) << " ntotal:" << index_->ntotal;
        return Status::success;
    }

    Status
    Deserialize(const BinarySet& binset, std::shared_ptr<Config> config) override {
        std::string name = Type();
        auto binary = binset.GetByName(name);
        MyMemoryIOReader reader;
        reader.total = binary->size;
        reader.data_ = binary->data.get();

        try {
            std::unique_ptr<faiss::Index> cpu_index(faiss::read_index(&reader));
            int32_t device_id = 0;
            MIN_LOAD_CHOOSE_ASCEND_DEVICE_WITH_ASSIGN(device_id, binary->size);
            std::vector<int> devices{ device_id };
            faiss::ascend::AscendClonerOptions option; // TODO: resourceSize怎么传递
            option.resourceSize = 2 * static_cast<int64_t>(1024 * 1024 * 1024);
            std::lock_guard<std::mutex> lk(*(ascend_device_manager::instance().get_dev_mutex_ptr(device_id)));
            LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Deserialize on device:"
                << device_id << ", thread-" << std::this_thread::get_id() << " this:" << int64_t(this);
            auto ascend_index = dynamic_cast<faiss::ascend::AscendIndexFlat *>(
                faiss::ascend::index_cpu_to_ascend(devices, cpu_index.get(), &option));
            if (ascend_index == nullptr) {
                LOG_KNOWHERE_ERROR_ << "error inner ascend, index_cpu_to_ascend failed";
                return Status::ascend_inner_error;
            }
            index_.reset(ascend_index);
            this->load_ = binary->size;
        } catch (const std::exception& e) {
            LOG_KNOWHERE_ERROR_ << "error inner ascend, " << e.what();
            return Status::ascend_inner_error;
        }
        LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Deserialize finished:"
            << std::this_thread::get_id() << " this:" << int64_t(this) << " ntotal:" << index_->ntotal;
        return Status::success;
    }

    Status
    DeserializeFromFile(const std::string& filename, std::shared_ptr<Config> config) override {
        LOG_KNOWHERE_ERROR_ << "ascend AscnedFlatIndexNode DeserializeFromFile not implemented";
        return Status::not_implemented;
    }

    std::unique_ptr<BaseConfig>
    CreateConfig() const override {
        return std::make_unique<AscendFlatConfig>();
    }

    int64_t
    Dim() const override {
        return static_cast<int64_t>(index_->d);
    }

    int64_t
    Size() const override {
        return index_->ntotal * static_cast<int64_t>(index_->d) * static_cast<int64_t>(sizeof(float));
    }

    int64_t
    Count() const override {
        return index_->ntotal;
    }

    std::string
    Type() const override {
        return knowhere::IndexEnum::INDEX_ASCEND_FLAT;
    }

    ~AscnedFlatIndexNode() override {
        LOG_KNOWHERE_INFO_ << "ascend AscnedFlatIndexNode Deconstruct:"
            << std::this_thread::get_id() << " this:" << int64_t(this) << " ntotal:" << index_->ntotal;
        try {
            int32_t device_id = index_->getDeviceList()[0];
            std::lock_guard<std::mutex> lk(*(ascend_device_manager::instance().get_dev_mutex_ptr(device_id)));
            RELEASE_ASCEND_DEVICE(device_id, this->load_);
        } catch (const std::exception& e) {
            LOG_KNOWHERE_WARNING_ << "error inner ascend, " << e.what();
        }
    }

 private:
    void Quantization(const float *in, int64_t nq, int64_t dim, float *out) const {
        std::vector<folly::Future<folly::Unit>> futs;
        futs.reserve(nq);
        for (int64_t i = 0; i < nq; i++) {
            futs.push_back(pool_->push([&, index = i] {
                for (int64_t j = 0; j < dim; j++) {
                    int64_t offset = index * dim + j;
                    out[offset] = in[offset];
                }
            }));
        }
        for (auto& fut : futs) {
            fut.wait();
        }
    }

    void Bitset2Mask(const BitsetView& bitset, int64_t nq, uint8_t *mask) const {
        size_t byte_size = bitset.byte_size();
        const uint8_t *bits = bitset.data();

        std::vector<folly::Future<folly::Unit>> futs;
        futs.reserve(nq);
        for (size_t i = 0; i < size_t(nq); i++) {
            futs.push_back(pool_->push([&, index = i] {
                for (size_t j = 0; j < byte_size; j++) {
                    mask[index * byte_size + j] = ~(bits[j]);
                }
            }));
        }
        for (auto& fut : futs) {
            fut.wait();
        }
    }

 private:
    std::unique_ptr<faiss::ascend::AscendIndexFlat> index_;
    std::shared_ptr<ThreadPool> pool_;
    size_t load_ = 0;
};

KNOWHERE_REGISTER_GLOBAL(
    ASCEND_FLAT,
    [](const int32_t& version, const Object& object) { 
        return Index<AscnedFlatIndexNode>::Create(version, object);
    },
    fp32,
    typeCheck<fp32>(knowhere::feature::FLOAT32),
    knowhere::feature::FLOAT32
);

}  // namespace knowhere
