// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/json/chunked_builder.h>

#include <mutex>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>

#include <nebula/core/array.h>
#include <nebula/core/buffer.h>
#include <nebula/json/converter.h>
#include <nebula/core/table.h>
#include <turbo/base/checked_cast.h>
#include <turbo/log/logging.h>
#include <nebula/future/task_group.h>

using nebula::internal::TaskGroup;
namespace nebula::json {

    namespace {

        turbo::Status MakeChunkedArrayBuilder(const std::shared_ptr<TaskGroup> &task_group,
                                       MemoryPool *pool, const PromotionGraph *promotion_graph,
                                       const std::shared_ptr<DataType> &type,
                                       bool allow_promotion,
                                       std::shared_ptr<ChunkedArrayBuilder> *out);

        class NonNestedChunkedArrayBuilder : public ChunkedArrayBuilder {
        public:
            NonNestedChunkedArrayBuilder(const std::shared_ptr<TaskGroup> &task_group,
                                         std::shared_ptr<Converter> converter)
                    : ChunkedArrayBuilder(task_group), converter_(std::move(converter)) {}

            turbo::Status finish(std::shared_ptr<ChunkedArray> *out) override {
                TURBO_RETURN_NOT_OK(task_group_->finish());
                *out = std::make_shared<ChunkedArray>(std::move(chunks_), converter_->out_type());
                chunks_.clear();
                return turbo::OkStatus();
            }

            turbo::Status ReplaceTaskGroup(const std::shared_ptr<TaskGroup> &task_group) override {
                TURBO_RETURN_NOT_OK(task_group_->finish());
                task_group_ = task_group;
                return turbo::OkStatus();
            }

        protected:
            ArrayVector chunks_;
            std::mutex mutex_;
            std::shared_ptr<Converter> converter_;
        };

        class TypedChunkedArrayBuilder
                : public NonNestedChunkedArrayBuilder,
                  public std::enable_shared_from_this<TypedChunkedArrayBuilder> {
        public:
            using NonNestedChunkedArrayBuilder::NonNestedChunkedArrayBuilder;

            void insert(int64_t block_index, const std::shared_ptr<Field> &,
                        const std::shared_ptr<Array> &unconverted) override {
                std::unique_lock<std::mutex> lock(mutex_);
                if (chunks_.size() <= static_cast<size_t>(block_index)) {
                    chunks_.resize(static_cast<size_t>(block_index) + 1, nullptr);
                }
                lock.unlock();

                auto self = shared_from_this();

                task_group_->append([self, block_index, unconverted] {
                    std::shared_ptr<Array> converted;
                    TURBO_RETURN_NOT_OK(self->converter_->Convert(unconverted, &converted));
                    std::unique_lock<std::mutex> lock(self->mutex_);
                    self->chunks_[block_index] = std::move(converted);
                    return turbo::OkStatus();
                });
            }
        };

        class InferringChunkedArrayBuilder
                : public NonNestedChunkedArrayBuilder,
                  public std::enable_shared_from_this<InferringChunkedArrayBuilder> {
        public:
            InferringChunkedArrayBuilder(const std::shared_ptr<TaskGroup> &task_group,
                                         const PromotionGraph *promotion_graph,
                                         std::shared_ptr<Converter> converter)
                    : NonNestedChunkedArrayBuilder(task_group, std::move(converter)),
                      promotion_graph_(promotion_graph) {}

            void insert(int64_t block_index, const std::shared_ptr<Field> &unconverted_field,
                        const std::shared_ptr<Array> &unconverted) override {
                std::unique_lock<std::mutex> lock(mutex_);
                if (chunks_.size() <= static_cast<size_t>(block_index)) {
                    chunks_.resize(static_cast<size_t>(block_index) + 1, nullptr);
                    unconverted_.resize(chunks_.size(), nullptr);
                    unconverted_fields_.resize(chunks_.size(), nullptr);
                }
                unconverted_[block_index] = unconverted;
                unconverted_fields_[block_index] = unconverted_field;
                lock.unlock();
                ScheduleConvertChunk(block_index);
            }

            void ScheduleConvertChunk(int64_t block_index) {
                auto self = shared_from_this();
                task_group_->append([self, block_index] {
                    return self->TryConvertChunk(static_cast<size_t>(block_index));
                });
            }

            turbo::Status TryConvertChunk(size_t block_index) {
                std::unique_lock<std::mutex> lock(mutex_);
                auto converter = converter_;
                auto unconverted = unconverted_[block_index];
                auto unconverted_field = unconverted_fields_[block_index];
                std::shared_ptr<Array> converted;

                lock.unlock();
                turbo::Status st = converter->Convert(unconverted, &converted);
                lock.lock();

                if (converter != converter_) {
                    // another task promoted converter; reconvert
                    lock.unlock();
                    ScheduleConvertChunk(block_index);
                    return turbo::OkStatus();
                }

                if (st.ok()) {
                    // conversion succeeded
                    chunks_[block_index] = std::move(converted);
                    return turbo::OkStatus();
                }

                auto promoted_type =
                        promotion_graph_->Promote(converter_->out_type(), unconverted_field);
                if (promoted_type == nullptr) {
                    // converter failed, no promotion available
                    return st;
                }
                TURBO_RETURN_NOT_OK(make_converter(promoted_type, converter_->pool(), &converter_));

                size_t nchunks = chunks_.size();
                for (size_t i = 0; i < nchunks; ++i) {
                    if (i != block_index && chunks_[i]) {
                        // We're assuming the chunk was converted using the wrong type
                        // (which should be true unless the executor reorders tasks)
                        chunks_[i].reset();
                        lock.unlock();
                        ScheduleConvertChunk(i);
                        lock.lock();
                    }
                }
                lock.unlock();
                ScheduleConvertChunk(block_index);
                return turbo::OkStatus();
            }

            turbo::Status finish(std::shared_ptr<ChunkedArray> *out) override {
                TURBO_RETURN_NOT_OK(NonNestedChunkedArrayBuilder::finish(out));
                unconverted_.clear();
                return turbo::OkStatus();
            }

        private:
            ArrayVector unconverted_;
            std::vector<std::shared_ptr<Field>> unconverted_fields_;
            const PromotionGraph *promotion_graph_;
        };

        class ChunkedListArrayBuilder : public ChunkedArrayBuilder {
        public:
            ChunkedListArrayBuilder(const std::shared_ptr<TaskGroup> &task_group, MemoryPool *pool,
                                    std::shared_ptr<ChunkedArrayBuilder> value_builder,
                                    const std::shared_ptr<Field> &value_field)
                    : ChunkedArrayBuilder(task_group),
                      pool_(pool),
                      value_builder_(std::move(value_builder)),
                      value_field_(value_field) {}

            turbo::Status ReplaceTaskGroup(const std::shared_ptr<TaskGroup> &task_group) override {
                TURBO_RETURN_NOT_OK(task_group_->finish());
                TURBO_RETURN_NOT_OK(value_builder_->ReplaceTaskGroup(task_group));
                task_group_ = task_group;
                return turbo::OkStatus();
            }

            void insert(int64_t block_index, const std::shared_ptr<Field> &,
                        const std::shared_ptr<Array> &unconverted) override {
                std::unique_lock<std::mutex> lock(mutex_);

                if (null_bitmap_chunks_.size() <= static_cast<size_t>(block_index)) {
                    null_bitmap_chunks_.resize(static_cast<size_t>(block_index) + 1, nullptr);
                    offset_chunks_.resize(null_bitmap_chunks_.size(), nullptr);
                }

                if (unconverted->type_id() == Type::NA) {
                    auto st = InsertNull(block_index, unconverted->length());
                    if (!st.ok()) {
                        task_group_->append([st] { return st; });
                    }
                    return;
                }

                        DKCHECK_EQ(unconverted->type_id(), Type::LIST);
                const auto &list_array = turbo::checked_cast<const ListArray &>(*unconverted);

                null_bitmap_chunks_[block_index] = unconverted->null_bitmap();
                offset_chunks_[block_index] = list_array.value_offsets();

                value_builder_->insert(block_index, list_array.list_type()->value_field(),
                                       list_array.values());
            }

            turbo::Status finish(std::shared_ptr<ChunkedArray> *out) override {
                TURBO_RETURN_NOT_OK(task_group_->finish());

                std::shared_ptr<ChunkedArray> value_array;
                TURBO_RETURN_NOT_OK(value_builder_->finish(&value_array));

                auto type = list(value_field_->with_type(value_array->type())->with_metadata(nullptr));
                ArrayVector chunks(null_bitmap_chunks_.size());
                for (size_t i = 0; i < null_bitmap_chunks_.size(); ++i) {
                    auto value_chunk = value_array->chunk(static_cast<int>(i));
                    auto length = offset_chunks_[i]->size() / sizeof(int32_t) - 1;
                    chunks[i] = std::make_shared<ListArray>(type, length, offset_chunks_[i],
                                                            value_chunk, null_bitmap_chunks_[i]);
                }

                *out = std::make_shared<ChunkedArray>(std::move(chunks), type);
                return turbo::OkStatus();
            }

        private:
            // call from insert() only, with mutex_ locked
            turbo::Status InsertNull(int64_t block_index, int64_t length) {
                value_builder_->insert(block_index, value_field_, std::make_shared<NullArray>(0));

                TURBO_MOVE_OR_RAISE(null_bitmap_chunks_[block_index],
                                       allocate_empty_bitmap(length, pool_));

                int64_t offsets_length = (length + 1) * sizeof(int32_t);
                TURBO_MOVE_OR_RAISE(offset_chunks_[block_index],
                                       allocate_buffer(offsets_length, pool_));
                std::memset(offset_chunks_[block_index]->mutable_data(), 0, offsets_length);

                return turbo::OkStatus();
            }

            std::mutex mutex_;
            MemoryPool *pool_;
            std::shared_ptr<ChunkedArrayBuilder> value_builder_;
            BufferVector offset_chunks_, null_bitmap_chunks_;
            std::shared_ptr<Field> value_field_;
        };

        class ChunkedStructArrayBuilder : public ChunkedArrayBuilder {
        public:
            ChunkedStructArrayBuilder(
                    const std::shared_ptr<TaskGroup> &task_group, MemoryPool *pool,
                    const PromotionGraph *promotion_graph,
                    std::vector<std::pair<std::string, std::shared_ptr<ChunkedArrayBuilder>>>
                    name_builders)
                    : ChunkedArrayBuilder(task_group), pool_(pool), promotion_graph_(promotion_graph) {
                for (auto &&name_builder: name_builders) {
                    auto index = static_cast<int>(name_to_index_.size());
                    name_to_index_.emplace(std::move(name_builder.first), index);
                    child_builders_.emplace_back(std::move(name_builder.second));
                }
            }

            void insert(int64_t block_index, const std::shared_ptr<Field> &,
                        const std::shared_ptr<Array> &unconverted) override {
                std::unique_lock<std::mutex> lock(mutex_);

                if (null_bitmap_chunks_.size() <= static_cast<size_t>(block_index)) {
                    null_bitmap_chunks_.resize(static_cast<size_t>(block_index) + 1, nullptr);
                    chunk_lengths_.resize(null_bitmap_chunks_.size(), -1);
                    child_absent_.resize(null_bitmap_chunks_.size(), std::vector<bool>(0));
                }
                null_bitmap_chunks_[block_index] = unconverted->null_bitmap();
                chunk_lengths_[block_index] = unconverted->length();

                if (unconverted->type_id() == Type::NA) {
                    auto maybe_buffer = allocate_bitmap(unconverted->length(), pool_);
                    if (maybe_buffer.ok()) {
                        null_bitmap_chunks_[block_index] = *std::move(maybe_buffer);
                        std::memset(null_bitmap_chunks_[block_index]->mutable_data(), 0,
                                    null_bitmap_chunks_[block_index]->size());
                    } else {
                        turbo::Status st = maybe_buffer.status();
                        task_group_->append([st] { return st; });
                    }

                    // absent fields will be inserted at finish
                    return;
                }

                const auto &struct_array = turbo::checked_cast<const StructArray &>(*unconverted);
                if (promotion_graph_ == nullptr) {
                    // If unexpected fields are ignored or result in an error then all parsers will emit
                    // columns exclusively in the ordering specified in ParseOptions::explicit_schema,
                    // so child_builders_ is immutable and no associative lookup is necessary.
                    for (int i = 0; i < unconverted->num_fields(); ++i) {
                        child_builders_[i]->insert(block_index, unconverted->type()->field(i),
                                                   struct_array.field(i));
                    }
                } else {
                    auto st = InsertChildren(block_index, struct_array);
                    if (!st.ok()) {
                        return task_group_->append([st] { return st; });
                    }
                }
            }

            turbo::Status finish(std::shared_ptr<ChunkedArray> *out) override {
                TURBO_RETURN_NOT_OK(task_group_->finish());

                if (promotion_graph_ != nullptr) {
                    // insert absent child chunks
                    for (auto &&name_index: name_to_index_) {
                        auto child_builder = child_builders_[name_index.second].get();

                        TURBO_RETURN_NOT_OK(child_builder->ReplaceTaskGroup(TaskGroup::MakeSerial()));

                        for (size_t i = 0; i < chunk_lengths_.size(); ++i) {
                            if (child_absent_[i].size() > static_cast<size_t>(name_index.second) &&
                                !child_absent_[i][name_index.second]) {
                                continue;
                            }
                            auto empty = std::make_shared<NullArray>(chunk_lengths_[i]);
                            child_builder->insert(i, promotion_graph_->Null(name_index.first), empty);
                        }
                    }
                }

                std::vector<std::shared_ptr<Field>> fields(name_to_index_.size());
                std::vector<std::shared_ptr<ChunkedArray>> child_arrays(name_to_index_.size());
                for (auto &&name_index: name_to_index_) {
                    auto child_builder = child_builders_[name_index.second].get();

                    std::shared_ptr<ChunkedArray> child_array;
                    TURBO_RETURN_NOT_OK(child_builder->finish(&child_array));

                    child_arrays[name_index.second] = child_array;
                    fields[name_index.second] = field(name_index.first, child_array->type());
                }

                auto type = STRUCT(std::move(fields));
                ArrayVector chunks(null_bitmap_chunks_.size());
                for (size_t i = 0; i < null_bitmap_chunks_.size(); ++i) {
                    ArrayVector child_chunks;
                    for (const auto &child_array: child_arrays) {
                        child_chunks.push_back(child_array->chunk(static_cast<int>(i)));
                    }
                    chunks[i] = std::make_shared<StructArray>(type, chunk_lengths_[i], child_chunks,
                                                              null_bitmap_chunks_[i]);
                }

                *out = std::make_shared<ChunkedArray>(std::move(chunks), type);
                return turbo::OkStatus();
            }

            turbo::Status ReplaceTaskGroup(const std::shared_ptr<TaskGroup> &task_group) override {
                TURBO_RETURN_NOT_OK(task_group_->finish());
                for (auto &&child_builder: child_builders_) {
                    TURBO_RETURN_NOT_OK(child_builder->ReplaceTaskGroup(task_group));
                }
                task_group_ = task_group;
                return turbo::OkStatus();
            }

        private:
            // insert children associatively by name; the unconverted block may have unexpected or
            // differently ordered fields
            // call from insert() only, with mutex_ locked
            turbo::Status InsertChildren(int64_t block_index, const StructArray &unconverted) {
                const auto &fields = unconverted.type()->fields();

                for (int i = 0; i < unconverted.num_fields(); ++i) {
                    auto it = name_to_index_.find(fields[i]->name());

                    if (it == name_to_index_.end()) {
                        // add a new field to this builder
                        auto type = promotion_graph_->Infer(fields[i]);
                                DKCHECK_NE(type, nullptr)
                                << "invalid unconverted_field encountered in conversion: "
                                << fields[i]->name() << ":" << *fields[i]->type();

                        auto new_index = static_cast<int>(name_to_index_.size());
                        it = name_to_index_.emplace(fields[i]->name(), new_index).first;

                        std::shared_ptr<ChunkedArrayBuilder> child_builder;
                        TURBO_RETURN_NOT_OK(MakeChunkedArrayBuilder(task_group_, pool_, promotion_graph_, type,
                                /*allow_promotion=*/true, &child_builder));
                        child_builders_.emplace_back(std::move(child_builder));
                    }

                    auto unconverted_field = unconverted.type()->field(i);
                    child_builders_[it->second]->insert(block_index, unconverted_field,
                                                        unconverted.field(i));

                    child_absent_[block_index].resize(child_builders_.size(), true);
                    child_absent_[block_index][it->second] = false;
                }

                return turbo::OkStatus();
            }

            std::mutex mutex_;
            MemoryPool *pool_;
            const PromotionGraph *promotion_graph_;
            std::unordered_map<std::string, int> name_to_index_;
            std::vector<std::shared_ptr<ChunkedArrayBuilder>> child_builders_;
            std::vector<std::vector<bool>> child_absent_;
            BufferVector null_bitmap_chunks_;
            std::vector<int64_t> chunk_lengths_;
        };

        turbo::Status MakeChunkedArrayBuilder(const std::shared_ptr<TaskGroup> &task_group,
                                       MemoryPool *pool, const PromotionGraph *promotion_graph,
                                       const std::shared_ptr<DataType> &type,
                                       bool allow_promotion,
                                       std::shared_ptr<ChunkedArrayBuilder> *out) {
            // If a promotion graph is provided, unexpected fields will be allowed - using the graph
            // recursively for itself and any child fields (via the `allow_promotion` parameter).
            // Fields provided in the schema will adhere to their corresponding type. However,
            // structs defined in the schema may obtain unexpected child fields, which will use the
            // promotion graph as well.
            //
            // If a promotion graph is not provided, unexpected fields are always ignored and
            // type inference never occurs.
            if (type->id() == Type::STRUCT) {
                std::vector<std::pair<std::string, std::shared_ptr<ChunkedArrayBuilder>>>
                        child_builders;
                for (const auto &f: type->fields()) {
                    std::shared_ptr<ChunkedArrayBuilder> child_builder;
                    TURBO_RETURN_NOT_OK(MakeChunkedArrayBuilder(task_group, pool, promotion_graph, f->type(),
                                                          allow_promotion, &child_builder));
                    child_builders.emplace_back(f->name(), std::move(child_builder));
                }
                *out = std::make_shared<ChunkedStructArrayBuilder>(task_group, pool, promotion_graph,
                                                                   std::move(child_builders));
                return turbo::OkStatus();
            }
            if (type->id() == Type::LIST) {
                const auto &list_type = turbo::checked_cast<const ListType &>(*type);
                std::shared_ptr<ChunkedArrayBuilder> value_builder;
                TURBO_RETURN_NOT_OK(MakeChunkedArrayBuilder(task_group, pool, promotion_graph,
                                                      list_type.get_value_type(), allow_promotion,
                                                      &value_builder));
                *out = std::make_shared<ChunkedListArrayBuilder>(
                        task_group, pool, std::move(value_builder), list_type.value_field());
                return turbo::OkStatus();
            }

            // Construct the "leaf" builder
            std::shared_ptr<Converter> converter;
            TURBO_RETURN_NOT_OK(make_converter(type, pool, &converter));
            if (allow_promotion && promotion_graph) {
                *out = std::make_shared<InferringChunkedArrayBuilder>(task_group, promotion_graph,
                                                                      std::move(converter));
            } else {
                *out = std::make_shared<TypedChunkedArrayBuilder>(task_group, std::move(converter));
            }
            return turbo::OkStatus();
        }

    }  // namespace

    // This overload is exposed to the user and will only be called once on instantiation to
    // canonicalize any explicitly-defined fields. Such fields won't be subject to
    // type inference/promotion
    turbo::Status MakeChunkedArrayBuilder(const std::shared_ptr<TaskGroup> &task_group,
                                   MemoryPool *pool, const PromotionGraph *promotion_graph,
                                   const std::shared_ptr<DataType> &type,
                                   std::shared_ptr<ChunkedArrayBuilder> *out) {
        return MakeChunkedArrayBuilder(task_group, pool, promotion_graph, type,
                /*allow_promotion=*/false, out);
    }
    
}  // namespace nebula::json
