// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/util/byte_size.h>

#include <cstdint>
#include <unordered_set>

#include <nebula/core/array.h>
#include <nebula/array/builder_primitive.h>
#include <nebula/core/buffer.h>
#include <nebula/core/chunked_array.h>
#include <nebula/core/record_batch.h>
#include <nebula/core/table.h>
#include <turbo/log/logging.h>
#include <nebula/util/ree_util.h>
#include <nebula/core/visit_type_inline.h>

namespace nebula::util {


    namespace {

        int64_t DoTotalBufferSize(const ArrayData &array_data,
                                  std::unordered_set<const uint8_t *> *seen_buffers) {
            int64_t sum = 0;
            for (const auto &buffer: array_data.buffers) {
                if (buffer && seen_buffers->insert(buffer->data()).second) {
                    sum += buffer->size();
                }
            }
            for (const auto &child: array_data.child_data) {
                sum += DoTotalBufferSize(*child, seen_buffers);
            }
            if (array_data.dictionary) {
                sum += DoTotalBufferSize(*array_data.dictionary, seen_buffers);
            }
            return sum;
        }

        int64_t DoTotalBufferSize(const Array &array,
                                  std::unordered_set<const uint8_t *> *seen_buffers) {
            return DoTotalBufferSize(*array.data(), seen_buffers);
        }

        int64_t DoTotalBufferSize(const ChunkedArray &chunked_array,
                                  std::unordered_set<const uint8_t *> *seen_buffers) {
            int64_t sum = 0;
            for (const auto &chunk: chunked_array.chunks()) {
                sum += DoTotalBufferSize(*chunk, seen_buffers);
            }
            return sum;
        }

        int64_t DoTotalBufferSize(const RecordBatch &record_batch,
                                  std::unordered_set<const uint8_t *> *seen_buffers) {
            int64_t sum = 0;
            for (const auto &column: record_batch.columns()) {
                sum += DoTotalBufferSize(*column, seen_buffers);
            }
            return sum;
        }

        int64_t DoTotalBufferSize(const Table &table,
                                  std::unordered_set<const uint8_t *> *seen_buffers) {
            int64_t sum = 0;
            for (const auto &column: table.columns()) {
                sum += DoTotalBufferSize(*column, seen_buffers);
            }
            return sum;
        }

    }  // namespace

    int64_t TotalBufferSize(const ArrayData &array_data) {
        std::unordered_set<const uint8_t *> seen_buffers;
        return DoTotalBufferSize(array_data, &seen_buffers);
    }

    int64_t TotalBufferSize(const Array &array) { return TotalBufferSize(*array.data()); }

    int64_t TotalBufferSize(const ChunkedArray &chunked_array) {
        std::unordered_set<const uint8_t *> seen_buffers;
        return DoTotalBufferSize(chunked_array, &seen_buffers);
    }

    int64_t TotalBufferSize(const RecordBatch &record_batch) {
        std::unordered_set<const uint8_t *> seen_buffers;
        return DoTotalBufferSize(record_batch, &seen_buffers);
    }

    int64_t TotalBufferSize(const Table &table) {
        std::unordered_set<const uint8_t *> seen_buffers;
        return DoTotalBufferSize(table, &seen_buffers);
    }

    namespace {

        struct GetByteRangesArray {
            const ArrayData &input;
            int64_t offset;
            int64_t length;
            UInt64Builder *range_starts;
            UInt64Builder *range_offsets;
            UInt64Builder *range_lengths;

            turbo::Status VisitBitmap(const std::shared_ptr<Buffer> &buffer) const {
                if (buffer) {
                    uint64_t data_start = reinterpret_cast<uint64_t>(buffer->data());
                    TURBO_RETURN_NOT_OK(range_starts->append(data_start));
                    TURBO_RETURN_NOT_OK(range_offsets->append(bit_util::RoundDown(offset, 8) / 8));
                    TURBO_RETURN_NOT_OK(range_lengths->append(bit_util::CoveringBytes(offset, length)));
                }
                return turbo::OkStatus();
            }

            turbo::Status VisitFixedWidthArray(const Buffer &buffer, const FixedWidthType &type) const {
                uint64_t data_start = reinterpret_cast<uint64_t>(buffer.data());
                uint64_t offset_bits = offset * type.bit_width();
                uint64_t offset_bytes = bit_util::RoundDown(static_cast<int64_t>(offset_bits), 8) / 8;
                uint64_t end_byte =
                        bit_util::RoundUp(static_cast<int64_t>(offset_bits + (length * type.bit_width())),
                                          8) /
                        8;
                uint64_t length_bytes = (end_byte - offset_bytes);
                TURBO_RETURN_NOT_OK(range_starts->append(data_start));
                TURBO_RETURN_NOT_OK(range_offsets->append(offset_bytes));
                return range_lengths->append(length_bytes);
            }

            turbo::Status Visit(const FixedWidthType &type) const {
                static_assert(sizeof(uint8_t *) <= sizeof(uint64_t),
                              "Undefined behavior if pointer larger than uint64_t");
                TURBO_RETURN_NOT_OK(VisitBitmap(input.buffers[0]));
                TURBO_RETURN_NOT_OK(VisitFixedWidthArray(*input.buffers[1], type));
                if (input.dictionary) {
                    // This is slightly imprecise because we always assume the entire dictionary is
                    // referenced.  If this array has an offset it may only be referencing a portion of
                    // the dictionary
                    GetByteRangesArray dict_visitor{*input.dictionary,
                                                    input.dictionary->offset,
                                                    input.dictionary->length,
                                                    range_starts,
                                                    range_offsets,
                                                    range_lengths};
                    return visit_type_inline(*input.dictionary->type, &dict_visitor);
                }
                return turbo::OkStatus();
            }

            turbo::Status Visit(const NullType &type) const { return turbo::OkStatus(); }

            template<typename BaseBinaryType>
            turbo::Status VisitBaseBinary(const BaseBinaryType &type) const {
                using offset_type = typename BaseBinaryType::offset_type;
                TURBO_RETURN_NOT_OK(VisitBitmap(input.buffers[0]));

                const Buffer &offsets_buffer = *input.buffers[1];
                TURBO_RETURN_NOT_OK(
                        range_starts->append(reinterpret_cast<uint64_t>(offsets_buffer.data())));
                TURBO_RETURN_NOT_OK(range_offsets->append(sizeof(offset_type) * offset));
                TURBO_RETURN_NOT_OK(range_lengths->append(sizeof(offset_type) * length));

                const offset_type *offsets = input.get_values<offset_type>(1, offset);
                const Buffer &values = *input.buffers[2];
                offset_type start = offsets[0];
                offset_type end = offsets[length];
                TURBO_RETURN_NOT_OK(range_starts->append(reinterpret_cast<uint64_t>(values.data())));
                TURBO_RETURN_NOT_OK(range_offsets->append(static_cast<uint64_t>(start)));
                return range_lengths->append(static_cast<uint64_t>(end - start));
            }

            turbo::Status Visit(const BinaryType &type) const { return VisitBaseBinary(type); }

            turbo::Status Visit(const LargeBinaryType &type) const { return VisitBaseBinary(type); }

            template<typename BaseListType>
            turbo::Status VisitBaseList(const BaseListType &type) const {
                using offset_type = typename BaseListType::offset_type;
                TURBO_RETURN_NOT_OK(VisitBitmap(input.buffers[0]));

                const Buffer &offsets_buffer = *input.buffers[1];
                TURBO_RETURN_NOT_OK(
                        range_starts->append(reinterpret_cast<uint64_t>(offsets_buffer.data())));
                TURBO_RETURN_NOT_OK(range_offsets->append(sizeof(offset_type) * offset));
                TURBO_RETURN_NOT_OK(range_lengths->append(sizeof(offset_type) * length));

                const offset_type *offsets = input.get_values<offset_type>(1, offset);
                int64_t start = static_cast<int64_t>(offsets[0]);
                int64_t end = static_cast<int64_t>(offsets[length]);
                GetByteRangesArray child{*input.child_data[0], start, end - start,
                                         range_starts, range_offsets, range_lengths};
                return visit_type_inline(*type.get_value_type(), &child);
            }

            turbo::Status Visit(const ListType &type) const { return VisitBaseList(type); }

            turbo::Status Visit(const LargeListType &type) const { return VisitBaseList(type); }

            turbo::Status Visit(const FixedSizeListType &type) const {
                TURBO_RETURN_NOT_OK(VisitBitmap(input.buffers[0]));
                GetByteRangesArray child{*input.child_data[0],
                                         offset * type.list_size(),
                                         length * type.list_size(),
                                         range_starts,
                                         range_offsets,
                                         range_lengths};
                return visit_type_inline(*type.get_value_type(), &child);
            }

            turbo::Status Visit(const StructType &type) const {
                for (int i = 0; i < type.num_fields(); i++) {
                    GetByteRangesArray child{*input.child_data[i],
                                             offset + input.child_data[i]->offset,
                                             length,
                                             range_starts,
                                             range_offsets,
                                             range_lengths};
                    TURBO_RETURN_NOT_OK(visit_type_inline(*type.field(i)->type(), &child));
                }
                return turbo::OkStatus();
            }

            turbo::Status Visit(const DenseUnionType &type) const {
                // Skip validity map for DenseUnionType
                // Types buffer is always int8
                TURBO_RETURN_NOT_OK(VisitFixedWidthArray(
                        *input.buffers[1], *std::dynamic_pointer_cast<FixedWidthType>(int8())));
                // Offsets buffer is always int32
                TURBO_RETURN_NOT_OK(VisitFixedWidthArray(
                        *input.buffers[2], *std::dynamic_pointer_cast<FixedWidthType>(int32())));

                // We have to loop through the types buffer to figure out the correct
                // offset / length being referenced in the child arrays
                std::vector<int64_t> lengths_per_type(type.type_codes().size());
                std::vector<int64_t> offsets_per_type(type.type_codes().size());
                const int8_t *type_codes = input.get_values<int8_t>(1, 0);
                for (const int8_t *it = type_codes; it != type_codes + offset; it++) {
                    DKCHECK_NE(type.child_ids()[static_cast<std::size_t>(*it)],
                              UnionType::kInvalidChildId);
                    offsets_per_type[type.child_ids()[static_cast<std::size_t>(*it)]]++;
                }
                for (const int8_t *it = type_codes + offset; it != type_codes + offset + length;
                     it++) {
                    DKCHECK_NE(type.child_ids()[static_cast<std::size_t>(*it)],
                              UnionType::kInvalidChildId);
                    lengths_per_type[type.child_ids()[static_cast<std::size_t>(*it)]]++;
                }

                for (int i = 0; i < type.num_fields(); i++) {
                    GetByteRangesArray child{
                            *input.child_data[i], offsets_per_type[i] + input.child_data[i]->offset,
                            lengths_per_type[i], range_starts,
                            range_offsets, range_lengths};
                    TURBO_RETURN_NOT_OK(visit_type_inline(*type.field(i)->type(), &child));
                }

                return turbo::OkStatus();
            }

            turbo::Status Visit(const SparseUnionType &type) const {
                // Skip validity map for SparseUnionType
                // Types buffer is always int8
                TURBO_RETURN_NOT_OK(VisitFixedWidthArray(
                        *input.buffers[1], *std::dynamic_pointer_cast<FixedWidthType>(int8())));

                for (int i = 0; i < type.num_fields(); i++) {
                    GetByteRangesArray child{*input.child_data[i],
                                             offset + input.child_data[i]->offset,
                                             length,
                                             range_starts,
                                             range_offsets,
                                             range_lengths};
                    TURBO_RETURN_NOT_OK(visit_type_inline(*type.field(i)->type(), &child));
                }

                return turbo::OkStatus();
            }

            turbo::Status Visit(const RunEndEncodedType &type) const {
                auto [phys_offset, phys_length] = ree_util::FindPhysicalRange(input, offset, length);
                for (int i = 0; i < type.num_fields(); i++) {
                    GetByteRangesArray child{*input.child_data[i],
                            /*offset=*/input.child_data[i]->offset + phys_offset,
                            /*length=*/phys_length,
                                             range_starts,
                                             range_offsets,
                                             range_lengths};
                    TURBO_RETURN_NOT_OK(visit_type_inline(*type.field(i)->type(), &child));
                }
                return turbo::OkStatus();
            }

            turbo::Status Visit(const ExtensionType &extension_type) const {
                GetByteRangesArray storage{input, offset, length,
                                           range_starts, range_offsets, range_lengths};
                return visit_type_inline(*extension_type.storage_type(), &storage);
            }

            turbo::Status Visit(const DataType &type) const {
                return turbo::failed_precondition_error("Extracting byte ranges not supported for type ",
                                                        type.to_string());
            }

            static std::shared_ptr<DataType> RangesType() {
                return STRUCT(
                        {field("start", uint64()), field("offset", uint64()), field("length", uint64())});
            }

            turbo::Result<std::shared_ptr<Array>> MakeRanges() const {
                std::shared_ptr<Array> range_starts_arr, range_offsets_arr, range_lengths_arr;
                TURBO_RETURN_NOT_OK(range_starts->finish(&range_starts_arr));
                TURBO_RETURN_NOT_OK(range_offsets->finish(&range_offsets_arr));
                TURBO_RETURN_NOT_OK(range_lengths->finish(&range_lengths_arr));
                return StructArray::create(
                        {range_starts_arr, range_offsets_arr, range_lengths_arr},
                        {field("start", uint64()), field("offset", uint64()), field("length", uint64())});
            }

            static turbo::Result<std::shared_ptr<Array>> Exec(const ArrayData &input) {
                UInt64Builder range_starts, range_offsets, range_lengths;
                GetByteRangesArray self{input, input.offset, input.length,
                                        &range_starts, &range_offsets, &range_lengths};
                TURBO_RETURN_NOT_OK(visit_type_inline(*input.type, &self));
                return self.MakeRanges();
            }
        };

        int64_t RangesToLengthSum(const Array &ranges) {
            int64_t sum = 0;
            const StructArray &ranges_struct = turbo::checked_cast<const StructArray &>(ranges);
            std::shared_ptr<UInt64Array> lengths =
                    turbo::checked_pointer_cast<UInt64Array>(ranges_struct.field(2));
            for (auto length: *lengths) {
                sum += static_cast<int64_t>(*length);
            }
            return sum;
        }

    }  // namespace

    turbo::Result<std::shared_ptr<Array>> ReferencedRanges(const ArrayData &array_data) {
        return GetByteRangesArray::Exec(array_data);
    }

    turbo::Result<int64_t> ReferencedBufferSize(const ArrayData &array_data) {
        TURBO_MOVE_OR_RAISE(const std::shared_ptr<Array> ranges,
                            GetByteRangesArray::Exec(array_data));
        return RangesToLengthSum(*ranges);
    }

    turbo::Result<int64_t> ReferencedBufferSize(const Array &array) {
        TURBO_MOVE_OR_RAISE(const std::shared_ptr<Array> ranges,
                            GetByteRangesArray::Exec(*array.data()));
        return RangesToLengthSum(*ranges);
    }

    turbo::Result<int64_t> ReferencedBufferSize(const ChunkedArray &array) {
        int64_t sum = 0;
        for (const auto &chunk: array.chunks()) {
            TURBO_MOVE_OR_RAISE(int64_t chunk_sum, ReferencedBufferSize(*chunk));
            sum += chunk_sum;
        }
        return sum;
    }

    turbo::Result<int64_t> ReferencedBufferSize(const RecordBatch &record_batch) {
        int64_t sum = 0;
        for (const auto &column: record_batch.columns()) {
            TURBO_MOVE_OR_RAISE(int64_t column_sum, ReferencedBufferSize(*column));
            sum += column_sum;
        }
        return sum;
    }

    turbo::Result<int64_t> ReferencedBufferSize(const Table &table) {
        int64_t sum = 0;
        for (const auto &column: table.columns()) {
            for (const auto &chunk: column->chunks()) {
                TURBO_MOVE_OR_RAISE(int64_t chunk_sum, ReferencedBufferSize(*chunk));
                sum += chunk_sum;
            }
        }
        return sum;
    }


}  // namespace nebula::util
