// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <cassert>
#include <cstddef>
#include <cstdint>

#include <nebula/array/data.h>
#include <nebula/bits/bit_block_counter.h>
#include <nebula/bits/bit_run_reader.h>
#include <nebula/bits/bit_util.h>
#include <nebula/bits/bitmap_ops.h>
#include <turbo/base/macros.h>

#ifndef NEBULA_COMPILER_ASSUME
#if defined(__GNUC__)  // GCC and compatible compilers (clang, Intel ICC)
#if defined(__clang__)  // clang-specific
#define NEBULA_COMPILER_ASSUME(expr) __builtin_assume(expr)
#else  // GCC-specific
#if __GNUC__ >= 13
#define NEBULA_COMPILER_ASSUME(expr) __attribute__((assume(expr)))
#else
// GCC does not have a built-in assume intrinsic before GCC 13, so we use an
// if statement and __builtin_unreachable() to achieve the same effect [2].
// Unlike clang's __builtin_assume and C++23's [[assume(expr)]], using this
// on GCC won't warn about side-effects in the expression, so make sure expr
// is side-effect free when working with GCC versions before 13 (Jan-2024),
// otherwise clang/MSVC builds will fail in CI.
#define NEBULA_COMPILER_ASSUME(expr) \
  if (expr) {                       \
  } else {                          \
    __builtin_unreachable();        \
  }
#endif  // __GNUC__ >= 13
#endif
#elif defined(_MSC_VER)  // MSVC
#define NEBULA_COMPILER_ASSUME(expr) __assume(expr)
#else
#define NEBULA_COMPILER_ASSUME(expr)
#endif
#endif  // NEBULA_COMPILER_ASSUME

// Implementation helpers for kernels that need to load/gather fixed-width
// data from multiple, arbitrary indices.
//
// https://en.wikipedia.org/wiki/Gather/scatter_(vector_addressing)

namespace nebula::internal {

    // CRTP [1] base class for Gather that provides a gathering loop in terms of
    // write*() methods that must be implemented by the derived class.
    //
    // [1] https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern
    template<class GatherImpl>
    class GatherBaseCRTP {
    public:
        // Output offset is not supported by Gather and idx is supposed to have offset
        // pre-applied. idx_validity parameters on functions can use the offset they
        // carry to read the validity bitmap as bitmaps can't have pre-applied offsets
        // (they might not align to byte boundaries).

        GatherBaseCRTP() = default;
        TURBO_DISALLOW_COPY_AND_ASSIGN(GatherBaseCRTP);
        TURBO_DEFAULT_MOVE_AND_ASSIGN(GatherBaseCRTP);

    protected:
        TURBO_FORCE_INLINE int64_t ExecuteNoNulls(int64_t idx_length) {
            auto *self = static_cast<GatherImpl *>(this);
            for (int64_t position = 0; position < idx_length; position++) {
                self->WriteValue(position);
            }
            return idx_length;
        }

        // See derived Gather classes below for the meaning of the parameters, pre and
        // post-conditions.
        //
        // src_validity is not necessarily the source of the values that are being
        // gathered (e.g. the source could be a nested fixed-size list array and the
        // values being gathered are from the innermost buffer), so the ArraySpan is
        // used solely to check for nulls in the source values and nothing else.
        //
        // idx_length is the number of elements in idx and consequently the number of
        // bits that might be written to out_is_valid. Member `write*()` functions will be
        // called with positions from 0 to idx_length - 1.
        //
        // If `kOutputIsZeroInitialized` is true, then `WriteZero()` or `WriteZeroSegment()`
        // doesn't have to be called for resulting null positions. A position is
        // considered null if either the index or the source value is null at that
        // position.
        template<bool kOutputIsZeroInitialized, typename IndexCType>
        TURBO_FORCE_INLINE int64_t ExecuteWithNulls(const ArraySpan &src_validity,
                                                    int64_t idx_length, const IndexCType *idx,
                                                    const ArraySpan &idx_validity,
                                                    uint8_t *out_is_valid) {
            auto *self = static_cast<GatherImpl *>(this);
            OptionalBitBlockCounter indices_bit_counter(idx_validity.buffers[0].data,
                                                        idx_validity.offset, idx_length);
            int64_t position = 0;
            int64_t valid_count = 0;
            while (position < idx_length) {
                BitBlockCount block = indices_bit_counter.NextBlock();
                if (!src_validity.may_have_nulls()) {
                    // Source values are never null, so things are easier
                    valid_count += block.popcount;
                    if (block.popcount == block.length) {
                        // Fastest path: neither source values nor index nulls
                        bit_util::SetBitsTo(out_is_valid, position, block.length, true);
                        for (int64_t i = 0; i < block.length; ++i) {
                            self->WriteValue(position);
                            ++position;
                        }
                    } else if (block.popcount > 0) {
                        // Slow path: some indices but not all are null
                        for (int64_t i = 0; i < block.length; ++i) {
                            NEBULA_COMPILER_ASSUME(idx_validity.buffers[0].data != nullptr);
                            if (idx_validity.is_valid(position)) {
                                // index is not null
                                bit_util::SetBit(out_is_valid, position);
                                self->WriteValue(position);
                            } else if constexpr (!kOutputIsZeroInitialized) {
                                self->WriteZero(position);
                            }
                            ++position;
                        }
                    } else {
                        self->WriteZeroSegment(position, block.length);
                        position += block.length;
                    }
                } else {
                    // Source values may be null, so we must do random access into src_validity
                    if (block.popcount == block.length) {
                        // Faster path: indices are not null but source values may be
                        for (int64_t i = 0; i < block.length; ++i) {
                            NEBULA_COMPILER_ASSUME(src_validity.buffers[0].data != nullptr);
                            if (src_validity.is_valid(idx[position])) {
                                // value is not null
                                self->WriteValue(position);
                                bit_util::SetBit(out_is_valid, position);
                                ++valid_count;
                            } else if constexpr (!kOutputIsZeroInitialized) {
                                self->WriteZero(position);
                            }
                            ++position;
                        }
                    } else if (block.popcount > 0) {
                        // Slow path: some but not all indices are null. Since we are doing
                        // random access in general we have to check the value nullness one by
                        // one.
                        for (int64_t i = 0; i < block.length; ++i) {
                            NEBULA_COMPILER_ASSUME(src_validity.buffers[0].data != nullptr);
                            NEBULA_COMPILER_ASSUME(idx_validity.buffers[0].data != nullptr);
                            if (idx_validity.is_valid(position) && src_validity.is_valid(idx[position])) {
                                // index is not null && value is not null
                                self->WriteValue(position);
                                bit_util::SetBit(out_is_valid, position);
                                ++valid_count;
                            } else if constexpr (!kOutputIsZeroInitialized) {
                                self->WriteZero(position);
                            }
                            ++position;
                        }
                    } else {
                        if constexpr (!kOutputIsZeroInitialized) {
                            self->WriteZeroSegment(position, block.length);
                        }
                        position += block.length;
                    }
                }
            }
            return valid_count;
        }
    };

    // A gather primitive for primitive fixed-width types with a integral byte width. If
    // `kWithFactor` is true, the actual width is a runtime multiple of `kValueWidthInbits`
    // (this can be useful for fixed-size list inputs and other input types with unusual byte
    // widths that don't deserve value specialization).
    template<int kValueWidthInBits, typename IndexCType, bool kWithFactor>
    class Gather : public GatherBaseCRTP<Gather<kValueWidthInBits, IndexCType, kWithFactor>> {
    public:
        static_assert(kValueWidthInBits >= 0 && kValueWidthInBits % 8 == 0);
        static constexpr int kValueWidth = kValueWidthInBits / 8;

    private:
        const int64_t src_length_;  // number of elements of kValueWidth bytes in src_
        const uint8_t *src_;
        const int64_t idx_length_;  // number IndexCType elements in idx_
        const IndexCType *idx_;
        uint8_t *out_;
        int64_t factor_;

    public:
        void WriteValue(int64_t position) {
            if constexpr (kWithFactor) {
                const int64_t scaled_factor = kValueWidth * factor_;
                memcpy(out_ + position * scaled_factor, src_ + idx_[position] * scaled_factor,
                       scaled_factor);
            } else {
                memcpy(out_ + position * kValueWidth, src_ + idx_[position] * kValueWidth,
                       kValueWidth);
            }
        }

        void WriteZero(int64_t position) {
            if constexpr (kWithFactor) {
                const int64_t scaled_factor = kValueWidth * factor_;
                memset(out_ + position * scaled_factor, 0, scaled_factor);
            } else {
                memset(out_ + position * kValueWidth, 0, kValueWidth);
            }
        }

        void WriteZeroSegment(int64_t position, int64_t length) {
            if constexpr (kWithFactor) {
                const int64_t scaled_factor = kValueWidth * factor_;
                memset(out_ + position * scaled_factor, 0, length * scaled_factor);
            } else {
                memset(out_ + position * kValueWidth, 0, length * kValueWidth);
            }
        }

    public:
        Gather(int64_t src_length, const uint8_t *src, int64_t zero_src_offset,
               int64_t idx_length, const IndexCType *idx, uint8_t *out, int64_t factor)
                : src_length_(src_length),
                  src_(src),
                  idx_length_(idx_length),
                  idx_(idx),
                  out_(out),
                  factor_(factor) {
            assert(zero_src_offset == 0);
            assert(src && idx && out);
            assert((kWithFactor || factor == 1) &&
                   "When kWithFactor is false, the factor is assumed to be 1 at compile time");
        }

        TURBO_FORCE_INLINE int64_t execute() { return this->ExecuteNoNulls(idx_length_); }

        /// \pre If kOutputIsZeroInitialized, then this->out_ has to be zero initialized.
        /// \pre Bits in out_is_valid have to always be zero initialized.
        /// \post The bits for the valid elements (and only those) are set in out_is_valid.
        /// \post If !kOutputIsZeroInitialized, then positions in this->_out containing null
        ///       elements have 0s written to them. This might be less efficient than
        ///       zero-initializing first and calling this->execute() afterwards.
        /// \return The number of valid elements in out.
        template<bool kOutputIsZeroInitialized = false>
        TURBO_FORCE_INLINE int64_t execute(const ArraySpan &src_validity,
                                           const ArraySpan &idx_validity,
                                           uint8_t *out_is_valid) {
            assert(src_length_ == src_validity.length);
            assert(idx_length_ == idx_validity.length);
            assert(out_is_valid);
            return this->template ExecuteWithNulls<kOutputIsZeroInitialized>(
                    src_validity, idx_length_, idx_, idx_validity, out_is_valid);
        }
    };

    // A gather primitive for boolean inputs. Unlike its counterpart above,
    // this does not support passing a non-trivial factor parameter.
    template<typename IndexCType>
    class Gather</*kValueWidthInBits=*/1, IndexCType, /*kWithFactor=*/false>
            : public GatherBaseCRTP<Gather<1, IndexCType, false>> {
    private:
        const int64_t src_length_;  // number of elements of bits bytes in src_ after offset
        const uint8_t *src_;        // the boolean array data buffer in bits
        const int64_t src_offset_;  // offset in bits
        const int64_t idx_length_;  // number IndexCType elements in idx_
        const IndexCType *idx_;
        uint8_t *out_;  // output boolean array data buffer in bits

    public:
        Gather(int64_t src_length, const uint8_t *src, int64_t src_offset, int64_t idx_length,
               const IndexCType *idx, uint8_t *out, int64_t factor)
                : src_length_(src_length),
                  src_(src),
                  src_offset_(src_offset),
                  idx_length_(idx_length),
                  idx_(idx),
                  out_(out) {
            assert(src && idx && out);
            assert(factor == 1 &&
                   "factor != 1 is not supported when Gather is used to gather bits/booleans");
        }

        void WriteValue(int64_t position) {
            bit_util::SetBitTo(out_, position,
                               bit_util::get_bit(src_, src_offset_ + idx_[position]));
        }

        void WriteZero(int64_t position) { bit_util::ClearBit(out_, position); }

        void WriteZeroSegment(int64_t position, int64_t block_length) {
            bit_util::SetBitsTo(out_, position, block_length, false);
        }

        TURBO_FORCE_INLINE int64_t execute() { return this->ExecuteNoNulls(idx_length_); }

        /// \pre If kOutputIsZeroInitialized, then this->out_ has to be zero initialized.
        /// \pre Bits in out_is_valid have to always be zero initialized.
        /// \post The bits for the valid elements (and only those) are set in out_is_valid.
        /// \post If !kOutputIsZeroInitialized, then positions in this->_out containing null
        ///       elements have 0s written to them. This might be less efficient than
        ///       zero-initializing first and calling this->execute() afterwards.
        /// \return The number of valid elements in out.
        template<bool kOutputIsZeroInitialized = false>
        TURBO_FORCE_INLINE int64_t execute(const ArraySpan &src_validity,
                                           const ArraySpan &idx_validity,
                                           uint8_t *out_is_valid) {
            assert(src_length_ == src_validity.length);
            assert(idx_length_ == idx_validity.length);
            assert(out_is_valid);
            return this->template ExecuteWithNulls<kOutputIsZeroInitialized>(
                    src_validity, idx_length_, idx_, idx_validity, out_is_valid);
        }
    };

}  // namespace nebula::internal
