// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/array/builder_adaptive.h>

#include <algorithm>
#include <cstdint>

#include <nebula/array/data.h>
#include <nebula/core/buffer.h>
#include <nebula/core/buffer_builder.h>

#include <turbo/utility/status.h>
#include <nebula/types/type.h>
#include <nebula/numeric/int_util.h>
#include <turbo/log/logging.h>

namespace nebula {

    using internal::AdaptiveIntBuilderBase;

    AdaptiveIntBuilderBase::AdaptiveIntBuilderBase(uint8_t start_int_size, MemoryPool *pool,
                                                   int64_t alignment)
            : ArrayBuilder(pool, alignment),
              start_int_size_(start_int_size),
              int_size_(start_int_size) {}

    void AdaptiveIntBuilderBase::reset() {
        ArrayBuilder::reset();
        data_.reset();
        raw_data_ = nullptr;
        pending_pos_ = 0;
        pending_has_nulls_ = false;
        int_size_ = start_int_size_;
    }

    turbo::Status AdaptiveIntBuilderBase::resize(int64_t capacity) {
        TURBO_RETURN_NOT_OK(check_capacity(capacity));
        capacity = std::max(capacity, kMinBuilderCapacity);

        int64_t nbytes = capacity * int_size_;
        if (capacity_ == 0) {
            TURBO_MOVE_OR_RAISE(data_, allocate_resizable_buffer(nbytes, pool_));
        } else {
            TURBO_RETURN_NOT_OK(data_->resize(nbytes));
        }
        raw_data_ = reinterpret_cast<uint8_t *>(data_->mutable_data());

        return ArrayBuilder::resize(capacity);
    }

    template<typename new_type, typename old_type>
    typename std::enable_if<sizeof(old_type) >= sizeof(new_type), turbo::Status>::type
    AdaptiveIntBuilderBase::ExpandIntSizeInternal() {
        return turbo::OkStatus();
    }

    template<typename new_type, typename old_type>
    typename std::enable_if<(sizeof(old_type) < sizeof(new_type)), turbo::Status>::type
    AdaptiveIntBuilderBase::ExpandIntSizeInternal() {
        int_size_ = sizeof(new_type);
        TURBO_RETURN_NOT_OK(resize(data_->size() / sizeof(old_type)));

        const old_type *src = reinterpret_cast<old_type *>(raw_data_);
        new_type *dst = reinterpret_cast<new_type *>(raw_data_);
        // By doing the backward copy, we ensure that no element is overridden during
        // the copy process while the copy stays in-place.
        std::copy_backward(src, src + length_, dst + length_);

        return turbo::OkStatus();
    }

    std::shared_ptr<DataType> AdaptiveUIntBuilder::type() const {
        auto int_size = int_size_;
        if (pending_pos_ != 0) {
            const uint8_t *valid_bytes = pending_has_nulls_ ? pending_valid_ : nullptr;
            int_size =
                    internal::DetectUIntWidth(pending_data_, valid_bytes, pending_pos_, int_size_);
        }
        switch (int_size) {
            case 1:
                return uint8();
            case 2:
                return uint16();
            case 4:
                return uint32();
            case 8:
                return uint64();
            default:
                        DKCHECK(false);
        }
        return nullptr;
    }

    std::shared_ptr<DataType> AdaptiveIntBuilder::type() const {
        auto int_size = int_size_;
        if (pending_pos_ != 0) {
            const uint8_t *valid_bytes = pending_has_nulls_ ? pending_valid_ : nullptr;
            int_size = internal::DetectIntWidth(reinterpret_cast<const int64_t *>(pending_data_),
                                                valid_bytes, pending_pos_, int_size_);
        }
        switch (int_size) {
            case 1:
                return int8();
            case 2:
                return int16();
            case 4:
                return int32();
            case 8:
                return int64();
            default:
                        DKCHECK(false);
        }
        return nullptr;
    }

    AdaptiveIntBuilder::AdaptiveIntBuilder(uint8_t start_int_size, MemoryPool *pool,
                                           int64_t alignment)
            : AdaptiveIntBuilderBase(start_int_size, pool, alignment) {}

    turbo::Status AdaptiveIntBuilder::finish_internal(std::shared_ptr<ArrayData> *out) {
        TURBO_RETURN_NOT_OK(CommitPendingData());

        std::shared_ptr<Buffer> null_bitmap;
        TURBO_RETURN_NOT_OK(null_bitmap_builder_.finish(&null_bitmap));
        TURBO_RETURN_NOT_OK(trim_buffer(length_ * int_size_, data_.get()));

        std::shared_ptr<Buffer> values_buffer = data_;
        if (!values_buffer) {
            TURBO_MOVE_OR_RAISE(values_buffer, allocate_buffer(0, pool_));
        }

        *out = ArrayData::create(type(), length_, {null_bitmap, std::move(values_buffer)},
                                 null_count_);

        data_ = nullptr;
        capacity_ = length_ = null_count_ = 0;
        return turbo::OkStatus();
    }

    turbo::Status AdaptiveIntBuilder::CommitPendingData() {
        if (pending_pos_ == 0) {
            return turbo::OkStatus();
        }
        TURBO_RETURN_NOT_OK(Reserve(pending_pos_));
        const uint8_t *valid_bytes = pending_has_nulls_ ? pending_valid_ : nullptr;
        TURBO_RETURN_NOT_OK(AppendValuesInternal(reinterpret_cast<const int64_t *>(pending_data_),
                                                 pending_pos_, valid_bytes));
        pending_has_nulls_ = false;
        pending_pos_ = 0;
        return turbo::OkStatus();
    }

    static constexpr int64_t kAdaptiveIntChunkSize = 8192;

    turbo::Status AdaptiveIntBuilder::AppendValuesInternal(const int64_t *values, int64_t length,
                                                           const uint8_t *valid_bytes) {
        if (pending_pos_ > 0) {
            // unsafe_append_to_bitmap expects length_ to be the pre-update value, satisfy it
                    DKCHECK_EQ(length, pending_pos_) << "AppendValuesInternal called while data pending";
            length_ -= pending_pos_;
        }

        while (length > 0) {
            // In case `length` is very large, we don't want to trash the cache by
            // scanning it twice (first to detect int width, second to copy the data).
            // Instead, process data in L2-cacheable chunks.
            const int64_t chunk_size = std::min(length, kAdaptiveIntChunkSize);

            uint8_t new_int_size;
            new_int_size = internal::DetectIntWidth(values, valid_bytes, chunk_size, int_size_);

                    DKCHECK_GE(new_int_size, int_size_);
            if (new_int_size > int_size_) {
                // This updates int_size_
                TURBO_RETURN_NOT_OK(ExpandIntSize(new_int_size));
            }

            switch (int_size_) {
                case 1:
                    internal::DowncastInts(values, reinterpret_cast<int8_t *>(raw_data_) + length_,
                                           chunk_size);
                    break;
                case 2:
                    internal::DowncastInts(values, reinterpret_cast<int16_t *>(raw_data_) + length_,
                                           chunk_size);
                    break;
                case 4:
                    internal::DowncastInts(values, reinterpret_cast<int32_t *>(raw_data_) + length_,
                                           chunk_size);
                    break;
                case 8:
                    internal::DowncastInts(values, reinterpret_cast<int64_t *>(raw_data_) + length_,
                                           chunk_size);
                    break;
                default:
                            DKCHECK(false);
            }

            // unsafe_append_to_bitmap increments length_ by chunk_size
            ArrayBuilder::unsafe_append_to_bitmap(valid_bytes, chunk_size);
            values += chunk_size;
            if (valid_bytes != nullptr) {
                valid_bytes += chunk_size;
            }
            length -= chunk_size;
        }

        return turbo::OkStatus();
    }

    turbo::Status AdaptiveUIntBuilder::CommitPendingData() {
        if (pending_pos_ == 0) {
            return turbo::OkStatus();
        }
        TURBO_RETURN_NOT_OK(Reserve(pending_pos_));
        const uint8_t *valid_bytes = pending_has_nulls_ ? pending_valid_ : nullptr;
        TURBO_RETURN_NOT_OK(AppendValuesInternal(pending_data_, pending_pos_, valid_bytes));
        pending_has_nulls_ = false;
        pending_pos_ = 0;
        return turbo::OkStatus();
    }

    turbo::Status AdaptiveIntBuilder::append_values(const int64_t *values, int64_t length,
                                                   const uint8_t *valid_bytes) {
        TURBO_RETURN_NOT_OK(CommitPendingData());
        TURBO_RETURN_NOT_OK(Reserve(length));

        return AppendValuesInternal(values, length, valid_bytes);
    }

    template<typename new_type>
    turbo::Status AdaptiveIntBuilder::ExpandIntSizeN() {
        switch (int_size_) {
            case 1:
                return ExpandIntSizeInternal<new_type, int8_t>();
            case 2:
                return ExpandIntSizeInternal<new_type, int16_t>();
            case 4:
                return ExpandIntSizeInternal<new_type, int32_t>();
            case 8:
                return ExpandIntSizeInternal<new_type, int64_t>();
            default:
                        DKCHECK(false);
        }
        return turbo::OkStatus();
    }

    turbo::Status AdaptiveIntBuilder::ExpandIntSize(uint8_t new_int_size) {
        switch (new_int_size) {
            case 1:
                return ExpandIntSizeN<int8_t>();
            case 2:
                return ExpandIntSizeN<int16_t>();
            case 4:
                return ExpandIntSizeN<int32_t>();
            case 8:
                return ExpandIntSizeN<int64_t>();
            default:
                        DKCHECK(false);
        }
        return turbo::OkStatus();
    }

    AdaptiveUIntBuilder::AdaptiveUIntBuilder(uint8_t start_int_size, MemoryPool *pool)
            : AdaptiveIntBuilderBase(start_int_size, pool) {}

    turbo::Status AdaptiveUIntBuilder::finish_internal(std::shared_ptr<ArrayData> *out) {
        TURBO_RETURN_NOT_OK(CommitPendingData());

        std::shared_ptr<Buffer> null_bitmap;
        TURBO_RETURN_NOT_OK(null_bitmap_builder_.finish(&null_bitmap));
        TURBO_RETURN_NOT_OK(trim_buffer(length_ * int_size_, data_.get()));

        *out = ArrayData::create(type(), length_, {null_bitmap, data_}, null_count_);

        data_ = nullptr;
        capacity_ = length_ = null_count_ = 0;
        return turbo::OkStatus();
    }

    turbo::Status AdaptiveUIntBuilder::AppendValuesInternal(const uint64_t *values, int64_t length,
                                                            const uint8_t *valid_bytes) {
        if (pending_pos_ > 0) {
            // unsafe_append_to_bitmap expects length_ to be the pre-update value, satisfy it
                    DKCHECK_EQ(length, pending_pos_) << "AppendValuesInternal called while data pending";
            length_ -= pending_pos_;
        }

        while (length > 0) {
            // See AdaptiveIntBuilder::AppendValuesInternal
            const int64_t chunk_size = std::min(length, kAdaptiveIntChunkSize);

            uint8_t new_int_size;
            new_int_size = internal::DetectUIntWidth(values, valid_bytes, chunk_size, int_size_);

                    DKCHECK_GE(new_int_size, int_size_);
            if (new_int_size > int_size_) {
                // This updates int_size_
                TURBO_RETURN_NOT_OK(ExpandIntSize(new_int_size));
            }

            switch (int_size_) {
                case 1:
                    internal::DowncastUInts(values, reinterpret_cast<uint8_t *>(raw_data_) + length_,
                                            chunk_size);
                    break;
                case 2:
                    internal::DowncastUInts(values, reinterpret_cast<uint16_t *>(raw_data_) + length_,
                                            chunk_size);
                    break;
                case 4:
                    internal::DowncastUInts(values, reinterpret_cast<uint32_t *>(raw_data_) + length_,
                                            chunk_size);
                    break;
                case 8:
                    internal::DowncastUInts(values, reinterpret_cast<uint64_t *>(raw_data_) + length_,
                                            chunk_size);
                    break;
                default:
                            DKCHECK(false);
            }

            // unsafe_append_to_bitmap increments length_ by chunk_size
            ArrayBuilder::unsafe_append_to_bitmap(valid_bytes, chunk_size);
            values += chunk_size;
            if (valid_bytes != nullptr) {
                valid_bytes += chunk_size;
            }
            length -= chunk_size;
        }

        return turbo::OkStatus();
    }

    turbo::Status AdaptiveUIntBuilder::append_values(const uint64_t *values, int64_t length,
                                                    const uint8_t *valid_bytes) {
        TURBO_RETURN_NOT_OK(Reserve(length));

        return AppendValuesInternal(values, length, valid_bytes);
    }

    template<typename new_type>
    turbo::Status AdaptiveUIntBuilder::ExpandIntSizeN() {
        switch (int_size_) {
            case 1:
                return ExpandIntSizeInternal<new_type, uint8_t>();
            case 2:
                return ExpandIntSizeInternal<new_type, uint16_t>();
            case 4:
                return ExpandIntSizeInternal<new_type, uint32_t>();
            case 8:
                return ExpandIntSizeInternal<new_type, uint64_t>();
            default:
                        DKCHECK(false);
        }
        return turbo::OkStatus();
    }

    turbo::Status AdaptiveUIntBuilder::ExpandIntSize(uint8_t new_int_size) {
        switch (new_int_size) {
            case 1:
                return ExpandIntSizeN<uint8_t>();
            case 2:
                return ExpandIntSizeN<uint16_t>();
            case 4:
                return ExpandIntSizeN<uint32_t>();
            case 8:
                return ExpandIntSizeN<uint64_t>();
            default:
                        DKCHECK(false);
        }
        return turbo::OkStatus();
    }

}  // namespace nebula
