// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
#include <melon/hash/hash.h>

#include <pollux/common/base/bit_util.h>
#include <pollux/common/base/exceptions.h>
#include <pollux/common/base/simd_util.h>
#include <pollux/vector/builder_type_utils.h>
#include <pollux/vector/constant_vector.h>
#include <pollux/vector/decoded_vector.h>
#include <pollux/vector/type_aliases.h>

namespace kumo::pollux {
    // Here are some common intel intrsic operations. Please refer to
    // https://software.intel.com/sites/landingpage/IntrinsicsGuide for examples.

    // _mm256_set1_epi<64x|32|16|8>(x) => set a 256bit vector with all values of x
    //    at the requested bit width
    // _mm256_cmpeq_epi<64|32|16|8>(a, b) => compare a vector of 8, 16, 32 or 64bit
    //    values in a vector with another vector. Result is a vector of all 0xFF..
    //    if the slot is equal between the two vectors or 0x00... if the slot is not
    //    equal between the two vectors
    // _mm256_cmpgt_epi<64|32|16|8>(a, b) => compare a vector of 8, 16, 32 or 64bit
    //    values in a vector with another vector. Result is a vector of all 0xFF..
    //    if the slot in `a` is greater than the slot in `b` or 0x00... otherwise
    // _mm256_loadu_si256(addr) => load 256 bits at addr into a single 256b
    // _mm256_movemask_ps(mask) -> Set each bit of mask dst based on the
    //    most significant bit of the corresponding packed single-precision (32-bit)
    //    floating-point element in a.
    // _mm256_testc_si256 => Compute bitwise AND of 2 256 bit vectors (see comment
    // blocks below for examples)

    // uses the simd utilities to smooth out access to variable width intrinsics

    // cost factors for individual operations on different filter paths - these are
    // experimentally derived from micro-bench perf testing.
    const double SIMD_CMP_COST = 0.00000051;
    const double SET_CMP_COST = 0.000023;

    template<typename T>
    const T *FlatVector<T>::rawValues() const {
        return rawValues_;
    }

    template<typename T>
    T FlatVector<T>::valueAtFast(vector_size_t idx) const {
        POLLUX_DCHECK_LT(idx, BaseVector::length_, "Index out of range");
        return rawValues_[idx];
    }

    template<typename T>
    Range<T> FlatVector<T>::asRange() const {
        return Range<T>(rawValues(), 0, BaseVector::length_);
    }

    template<typename T>
    ksimd::batch<T> FlatVector<T>::loadSIMDValueBufferAt(size_t byteOffset) const {
        auto mem = reinterpret_cast<uint8_t *>(rawValues_) + byteOffset;
        if constexpr (std::is_same_v<T, bool>) {
            return ksimd::batch<T>(ksimd::load_unaligned(mem));
        } else {
            return ksimd::load_unaligned(reinterpret_cast<T *>(mem));
        }
    }

    template<typename T>
    std::unique_ptr<SimpleVector<uint64_t> > FlatVector<T>::hash_all() const {
        BufferPtr hashBuffer =
                AlignedBuffer::allocate<uint64_t>(BaseVector::length_, BaseVector::pool_);
        auto hashData = hashBuffer->asMutable<uint64_t>();

        if (rawValues_ != nullptr) {
            // non all-null case
            melon::hasher<T> hasher;
            for (size_t i = 0; i < BaseVector::length_; ++i) {
                hashData[i] = hasher(valueAtFast(i));
            }
        }

        // overwrite the null hash values
        if (BaseVector::rawNulls_ != nullptr) {
            for (size_t i = 0; i < BaseVector::length_; ++i) {
                if (bits::isBitNull(BaseVector::rawNulls_, i)) {
                    hashData[i] = BaseVector::kNullHash;
                }
            }
        }
        return std::make_unique<FlatVector<uint64_t> >(
            BaseVector::pool_,
            BIGINT(),
            BufferPtr(nullptr),
            BaseVector::length_,
            std::move(hashBuffer),
            std::vector<BufferPtr>() /*stringBuffers*/,
            SimpleVectorStats<uint64_t>{}, /* stats */
            std::nullopt /*distinctValueCount*/,
            0 /*nullCount*/,
            false /*sorted*/,
            sizeof(uint64_t) * BaseVector::length_ /*represented_bytes*/);
    }

    template<typename T>
    bool FlatVector<T>::useSimdEquality(size_t numCmpVals) const {
        if constexpr (!std::is_integral_v<T>) {
            return false;
        } else {
            // Uses a cost estimate for a SIMD comparison of a single comparison
            // value vs. that of doing the fallback set lookup to determine
            // whether or not to pursue the SIMD path or the fallback path.
            auto fallbackCost = SET_CMP_COST * BaseVector::length_;
            auto simdCost = SIMD_CMP_COST * numCmpVals * BaseVector::length_ /
                            ksimd::batch<T>::size;
            return simdCost <= fallbackCost;
        }
    }

    template<typename T>
    void FlatVector<T>::copyValuesAndNulls(
        const BaseVector *source,
        const SelectivityVector &rows,
        const vector_size_t *toSourceRow) {
        if (source->type_kind() == TypeKind::UNKNOWN) {
            auto *rawNulls = BaseVector::mutable_raw_nulls();
            rows.set_nulls(rawNulls);
            return;
        }

        source = source->loaded_vector();
        POLLUX_CHECK_EQ(BaseVector::type_kind(), source->type_kind());
        POLLUX_CHECK_GE(BaseVector::length_, rows.end());
        if (!toSourceRow) {
            POLLUX_CHECK_GE(source->size(), rows.end());
        }

        uint64_t *rawNulls = const_cast<uint64_t *>(BaseVector::rawNulls_);
        if (source->may_have_nulls()) {
            rawNulls = BaseVector::mutable_raw_nulls();
        }

        // Allocate values buffer if not allocated yet. This may happen if vector
        // contains only null values.
        if (!values_) {
            mutableRawValues();
        }

        if (source->is_flat_encoding()) {
            auto *flatSource = source->as_unchecked<FlatVector<T> >();
            if (flatSource->values() == nullptr) {
                // All source values are null.
                rows.set_nulls(rawNulls);
                return;
            }

            if constexpr (std::is_same_v<T, bool>) {
                auto *rawValues = reinterpret_cast<uint64_t *>(rawValues_);
                auto *sourceValues = flatSource->template rawValues<uint64_t>();
                if (toSourceRow) {
                    rows.applyToSelected([&](auto row) {
                        auto sourceRow = toSourceRow[row];
                        POLLUX_DCHECK_GT(source->size(), sourceRow);
                        bits::setBit(rawValues, row, bits::isBitSet(sourceValues, sourceRow));
                    });
                } else {
                    const auto numBits = rows.countSelected();
                    if (numBits == rows.end() - rows.begin()) {
                        // Fast path for copying contiguous range of bits.
                        bits::copyBits(
                            sourceValues, rows.begin(), rawValues, rows.begin(), numBits);
                    } else {
                        rows.applyToSelected([&](auto row) {
                            bits::setBit(rawValues, row, bits::isBitSet(sourceValues, row));
                        });
                    }
                }
            } else {
                auto *sourceValues = flatSource->rawValues();
                if (toSourceRow) {
                    rows.applyToSelected([&](auto row) {
                        auto sourceRow = toSourceRow[row];
                        POLLUX_DCHECK_GT(source->size(), sourceRow);
                        rawValues_[row] = sourceValues[sourceRow];
                    });
                } else {
                    rows.applyToSelected(
                        [&](auto row) { rawValues_[row] = sourceValues[row]; });
                }
            }

            if (rawNulls) {
                const uint64_t *sourceNulls = source->raw_nulls();

                if (!sourceNulls) {
                    rows.clear_nulls(rawNulls);
                } else {
                    if (toSourceRow) {
                        rows.applyToSelected([&](auto row) {
                            auto sourceRow = toSourceRow[row];
                            POLLUX_DCHECK_GT(source->size(), sourceRow);
                            bits::set_null(
                                rawNulls, row, bits::isBitNull(sourceNulls, sourceRow));
                        });
                    } else {
                        rows.copy_nulls(rawNulls, sourceNulls);
                    }
                }
            }
        } else if (source->is_constant_encoding()) {
            if (source->is_null_at(0)) {
                BaseVector::add_nulls(rows);
                return;
            }
            auto constant = source->as_unchecked<ConstantVector<T> >();
            T value = constant->value_at(0);
            if constexpr (std::is_same_v<T, bool>) {
                auto range = rows.asRange();
                auto *rawValues = reinterpret_cast<uint64_t *>(rawValues_);
                if (value) {
                    bits::orBits(rawValues, range.bits(), range.begin(), range.end());
                } else {
                    bits::andWithNegatedBits(
                        rawValues, range.bits(), range.begin(), range.end());
                }
            } else {
                rows.applyToSelected([&](int32_t row) { rawValues_[row] = value; });
            }

            rows.clear_nulls(rawNulls);
        } else {
            DecodedVector decoded(*source);
            if (toSourceRow == nullptr) {
                rows.applyToSelected([&](auto row) {
                    if (!decoded.is_null_at(row)) {
                        if constexpr (std::is_same_v<T, bool>) {
                            auto *rawValues = reinterpret_cast<uint64_t *>(rawValues_);
                            bits::setBit(rawValues, row, decoded.value_at<T>(row));
                        } else {
                            rawValues_[row] = decoded.value_at<T>(row);
                        }
                    }
                });

                if (rawNulls != nullptr) {
                    auto *sourceNulls = decoded.nulls();
                    if (sourceNulls == nullptr) {
                        rows.clear_nulls(rawNulls);
                    } else {
                        rows.copy_nulls(rawNulls, sourceNulls);
                    }
                }
            } else {
                rows.applyToSelected([&](auto row) {
                    const auto sourceRow = toSourceRow[row];
                    POLLUX_DCHECK_GT(source->size(), sourceRow);
                    if (!decoded.is_null_at(sourceRow)) {
                        if constexpr (std::is_same_v<T, bool>) {
                            auto *rawValues = reinterpret_cast<uint64_t *>(rawValues_);
                            bits::setBit(rawValues, row, decoded.value_at<T>(sourceRow));
                        } else {
                            rawValues_[row] = decoded.value_at<T>(sourceRow);
                        }

                        if (rawNulls) {
                            bits::clearNull(rawNulls, row);
                        }
                    } else {
                        bits::set_null(rawNulls, row);
                    }
                });
            }
        }
    }

    template<typename T>
    void FlatVector<T>::copy_ranges(
        const BaseVector *source,
        const melon::Range<const BaseVector::CopyRange *> &ranges) {
        if (source->type_kind() == TypeKind::UNKNOWN) {
            BaseVector::set_nulls(BaseVector::mutable_raw_nulls(), ranges, true);
            return;
        }

        source = source->loaded_vector();
        POLLUX_CHECK_EQ(BaseVector::type_kind(), source->type_kind());

        if constexpr (std::is_same_v<T, StringView>) {
            auto leaf =
                    source->wrapped_vector()->as_unchecked<SimpleVector<StringView> >();
            if (BaseVector::pool_ != leaf->pool()) {
                apply_to_each_row(ranges, [&](auto targetIndex, auto sourceIndex) {
                    if (source->is_null_at(sourceIndex)) {
                        this->set_null(targetIndex, true);
                    } else {
                        this->set(
                            targetIndex, leaf->value_at(source->wrapped_index(sourceIndex)));
                    }
                });
                return;
            }

            // We copy referencing the storage of 'source'.
            acquireSharedStringBuffers(source);
        }

        const uint64_t *sourceRawNulls = source->raw_nulls();
        uint64_t *rawNulls = const_cast<uint64_t *>(BaseVector::rawNulls_);
        if (source->may_have_nulls()) {
            rawNulls = BaseVector::mutable_raw_nulls();
        }

        // Allocate values buffer if not allocated yet. This may happen if vector
        // contains only null values.
        if (!values_) {
            mutableRawValues();
        }

        if (source->is_flat_encoding()) {
            auto *flatSource = source->as_unchecked<FlatVector<T> >();
            if (flatSource->values() == nullptr) {
                // All source values are null.
                BaseVector::set_nulls(BaseVector::mutable_raw_nulls(), ranges, true);
                return;
            }

            if constexpr (std::is_same_v<T, bool>) {
                auto rawValues = reinterpret_cast<uint64_t *>(rawValues_);
                auto *sourceValues = flatSource->template rawValues<uint64_t>();
                apply_to_each_range(
                    ranges, [&](auto targetIndex, auto sourceIndex, auto count) {
                        bits::copyBits(
                            sourceValues, sourceIndex, rawValues, targetIndex, count);
                    });
            } else {
                const T *sourceValues = flatSource->rawValues();
                apply_to_each_range(
                    ranges, [&](auto targetIndex, auto sourceIndex, auto count) {
                        if constexpr (Buffer::is_pod_like_v<T>) {
                            memcpy(
                                &rawValues_[targetIndex],
                                &sourceValues[sourceIndex],
                                count * sizeof(T));
                        } else {
                            std::copy(
                                sourceValues + sourceIndex,
                                sourceValues + sourceIndex + count,
                                rawValues_ + targetIndex);
                        }
                    });
            }

            if (rawNulls) {
                if (sourceRawNulls) {
                    BaseVector::copy_nulls(rawNulls, sourceRawNulls, ranges);
                } else {
                    BaseVector::set_nulls(rawNulls, ranges, false);
                }
            }
        } else if (source->is_constant_encoding()) {
            if (source->is_null_at(0)) {
                BaseVector::set_nulls(rawNulls, ranges, true);
                return;
            }
            auto constant = source->as_unchecked<ConstantVector<T> >();
            T value = constant->value_at(0);
            if constexpr (std::is_same_v<T, bool>) {
                auto rawValues = reinterpret_cast<uint64_t *>(rawValues_);
                apply_to_each_range(
                    ranges, [&](auto targetIndex, auto /*sourceIndex*/, auto count) {
                        bits::fillBits(rawValues, targetIndex, targetIndex + count, value);
                    });
            } else {
                apply_to_each_row(ranges, [&](auto targetIndex, auto /*sourceIndex*/) {
                    rawValues_[targetIndex] = value;
                });
            }

            if (rawNulls) {
                BaseVector::set_nulls(rawNulls, ranges, false);
            }
        } else {
            auto *sourceVector = source->as_unchecked<SimpleVector<T> >();
            uint64_t *rawBoolValues = nullptr;
            if constexpr (std::is_same_v<T, bool>) {
                rawBoolValues = reinterpret_cast<uint64_t *>(rawValues_);
            }
            apply_to_each_row(ranges, [&](auto targetIndex, auto sourceIndex) {
                if (!source->is_null_at(sourceIndex)) {
                    auto sourceValue = sourceVector->value_at(sourceIndex);
                    if constexpr (std::is_same_v<T, bool>) {
                        bits::setBit(rawBoolValues, targetIndex, sourceValue);
                    } else {
                        rawValues_[targetIndex] = sourceValue;
                    }
                    if (rawNulls) {
                        bits::clearNull(rawNulls, targetIndex);
                    }
                } else {
                    bits::set_null(rawNulls, targetIndex);
                }
            });
        }
    }

    template<typename T>
    VectorPtr FlatVector<T>::slice(vector_size_t offset, vector_size_t length)
    const {
        BufferPtr values;
        if (values_) {
            // Values can be shorter than vector due to trailing nulls.
            auto numValues = std::is_same_v<T, bool>
                                 ? 8 * values_->size()
                                 : values_->size() / sizeof(T);
            auto newNumValues = std::min<vector_size_t>(numValues, offset + length);
            if (newNumValues >= offset) {
                values =
                        Buffer::slice<T>(values_, offset, newNumValues - offset, this->pool_);
            }
        }
        return std::make_shared<FlatVector<T> >(
            this->pool_,
            this->type_,
            this->slice_nulls(offset, length),
            length,
            std::move(values),
            std::vector<BufferPtr>(stringBuffers_));
    }

    template<typename T>
    void FlatVector<T>::resize(vector_size_t newSize, bool setNotNull) {
        const vector_size_t previousSize = BaseVector::length_;
        if (newSize == previousSize) {
            return;
        }
        BaseVector::resize(newSize, setNotNull);
        if (!values_) {
            return;
        }

        if constexpr (std::is_same_v<T, StringView>) {
            resizeValues(newSize, StringView());
            if (newSize < previousSize) {
                // If we downsize, just invalidate ascii, because we might have become
                // 'all ascii' from 'not all ascii'.
                SimpleVector<StringView>::invalidateIsAscii();
            } else {
                // Properly init stringView objects. This is useful when vectors are
                // re-used where the size changes but not the capacity.
                // TODO: remove this when resizeValues() checks against size() instead of
                // capacity() when deciding to init values.
                auto stringViews = reinterpret_cast<StringView *>(rawValues_);
                for (auto index = previousSize; index < newSize; ++index) {
                    new(&stringViews[index]) StringView();
                }
                SimpleVector<StringView>::resizeIsAsciiIfNotEmpty(newSize, false);
            }
            if (newSize == 0) {
                keepAtMostOneStringBuffer();
            }
        } else {
            resizeValues(newSize, std::nullopt);
        }
    }

    template<typename T>
    void FlatVector<T>::ensure_writable(const SelectivityVector &rows) {
        auto newSize = std::max<vector_size_t>(rows.end(), BaseVector::length_);
        if (values_ && !values_->isMutable()) {
            BufferPtr newValues;
            if constexpr (std::is_same_v<T, StringView>) {
                // Make sure to initialize StringView values so they can be safely
                // accessed.
                newValues = AlignedBuffer::allocate<T>(newSize, BaseVector::pool_, T());
            } else {
                newValues = AlignedBuffer::allocate<T>(newSize, BaseVector::pool_);
            }

            if constexpr (std::is_same_v<T, bool>) {
                auto rawNewValues = newValues->asMutable<uint64_t>();
                std::memcpy(
                    rawNewValues,
                    rawValues_,
                    std::min(values_->size(), newValues->size()));
            } else {
                auto rawNewValues = newValues->asMutable<T>();
                SelectivityVector rowsToCopy(BaseVector::length_);
                rowsToCopy.deselect(rows);
                rowsToCopy.applyToSelected(
                    [&](vector_size_t row) { rawNewValues[row] = rawValues_[row]; });
            }

            // Keep the string buffers even if multiply referenced. These are
            // append-only and are written to in FlatVector::set which calls
            // getBufferWithSpace which allocates a new buffer if existing buffers
            // are multiply-referenced.

            // TODO Optimization: check and remove string buffers not referenced by
            // rowsToCopy

            values_ = std::move(newValues);
            rawValues_ = values_->asMutable<T>();
        }

        BaseVector::ensure_writable(rows);
    }

    template<typename T>
    void FlatVector<T>::prepare_for_reuse() {
        BaseVector::prepare_for_reuse();

        // Check values buffer. Keep the buffer if singly-referenced and mutable.
        // Reset otherwise.
        if (values_ && !values_->isMutable()) {
            values_ = nullptr;
            rawValues_ = nullptr;
        }
    }

    template<typename T>
    void FlatVector<T>::resizeValues(
        vector_size_t newSize,
        const std::optional<T> &initialValue) {
        // TODO: change this to isMutable(). See
        // https://github.com/facebookincubator/pollux/issues/6562.
        if (values_ && !values_->isView()) {
            const uint64_t newByteSize = BaseVector::byteSize<T>(newSize);
            if (values_->capacity() < newByteSize) {
                AlignedBuffer::reallocate<T>(&values_, newSize, initialValue);
            } else {
                values_->setSize(newByteSize);
            }
            rawValues_ = values_->asMutable<T>();
            return;
        }
        BufferPtr newValues =
                AlignedBuffer::allocate<T>(newSize, BaseVector::pool_, initialValue);

        if (values_) {
            if constexpr (Buffer::is_pod_like_v<T>) {
                auto dst = newValues->asMutable<T>();
                auto src = values_->as<T>();
                auto len = std::min(values_->size(), newValues->size());
                memcpy(dst, src, len);
            } else {
                const vector_size_t previousSize = BaseVector::length_;
                auto *rawOldValues = newValues->asMutable<T>();
                auto *rawNewValues = newValues->asMutable<T>();
                const auto len = std::min<vector_size_t>(newSize, previousSize);
                for (vector_size_t row = 0; row < len; ++row) {
                    rawNewValues[row] = rawOldValues[row];
                }
            }
        }
        values_ = std::move(newValues);
        rawValues_ = values_->asMutable<T>();
    }

    template<>
    inline void FlatVector<bool>::resizeValues(
        vector_size_t newSize,
        const std::optional<bool> &initialValue) {
        // TODO: change this to isMutable(). See
        // https://github.com/facebookincubator/pollux/issues/6562.
        if (values_ && !values_->isView()) {
            const uint64_t newByteSize = BaseVector::byteSize<bool>(newSize);
            if (values_->size() < newByteSize) {
                AlignedBuffer::reallocate<bool>(&values_, newSize, initialValue);
            } else {
                values_->setSize(newByteSize);
            }
            // ensure that the newly added positions have the right initial value for
            // the case where changes in size don't result in change in the size of
            // the underlying buffer.
            if (initialValue.has_value() && length_ < newSize) {
                auto rawData = values_->asMutable<uint64_t>();
                bits::fillBits(rawData, length_, newSize, initialValue.value());
            }
            rawValues_ = values_->asMutable<bool>();
            return;
        }
        BufferPtr newValues =
                AlignedBuffer::allocate<bool>(newSize, BaseVector::pool_, initialValue);

        if (values_) {
            auto dst = newValues->asMutable<char>();
            auto src = values_->as<char>();
            auto len = std::min(values_->size(), newValues->size());
            memcpy(dst, src, len);
        }
        values_ = std::move(newValues);
        rawValues_ = values_->asMutable<bool>();
    }
} // namespace kumo::pollux
