// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <cstddef>
#include <cstdint>
#include <memory>
#include <mutex>
#include <sstream>
#include <string>
#include <utility>
#include <vector>

#include <nebula/core/array.h>
#include <nebula/array/builder_base.h>
#include <nebula/core/chunked_array.h>
#include <nebula/csv/column_builder.h>
#include <nebula/csv/converter.h>
#include <nebula/csv/inference_internal.h>
#include <nebula/csv/options.h>
#include <nebula/csv/parser.h>
#include <turbo/utility/status.h>
#include <nebula/types/type.h>
#include <nebula/types/type_fwd.h>
#include <turbo/log/logging.h>
#include <nebula/future/task_group.h>

namespace nebula {

    using internal::TaskGroup;
}

namespace nebula::csv {

    class BlockParser;

    class ConcreteColumnBuilder : public ColumnBuilder {
    public:
        explicit ConcreteColumnBuilder(MemoryPool *pool, std::shared_ptr<TaskGroup> task_group,
                                       int32_t col_index = -1)
                : ColumnBuilder(std::move(task_group)), pool_(pool), col_index_(col_index) {}

        void append(const std::shared_ptr<BlockParser> &parser) override {
            insert(static_cast<int64_t>(chunks_.size()), parser);
        }

        turbo::Result<std::shared_ptr<ChunkedArray>> finish() override {
            std::lock_guard<std::mutex> lock(mutex_);

            return FinishUnlocked();
        }

    protected:
        virtual std::shared_ptr<DataType> type() const = 0;

        turbo::Result<std::shared_ptr<ChunkedArray>> FinishUnlocked() {
            auto type = this->type();
            for (const auto &chunk: chunks_) {
                if (chunk == nullptr) {
                    return turbo::unknown_error("a chunk failed converting for an unknown reason");
                }
                        DKCHECK_EQ(chunk->type()->id(), type->id()) << "Chunk types not equal!";
            }
            return std::make_shared<ChunkedArray>(chunks_, std::move(type));
        }

        void ReserveChunks(int64_t block_index) {
            // create a null Array pointer at the back at the list.
            std::lock_guard<std::mutex> lock(mutex_);
            ReserveChunksUnlocked(block_index);
        }

        void ReserveChunksUnlocked(int64_t block_index) {
            // create a null Array pointer at the back at the list.
            size_t chunk_index = static_cast<size_t>(block_index);
            if (chunks_.size() <= chunk_index) {
                chunks_.resize(chunk_index + 1);
            }
        }

        turbo::Status SetChunk(int64_t chunk_index, turbo::Result<std::shared_ptr<Array>> maybe_array) {
            std::lock_guard<std::mutex> lock(mutex_);
            return SetChunkUnlocked(chunk_index, std::move(maybe_array));
        }

        turbo::Status SetChunkUnlocked(int64_t chunk_index,
                                turbo::Result<std::shared_ptr<Array>> maybe_array) {
            // Should not insert an already built chunk
                    DKCHECK_EQ(chunks_[chunk_index], nullptr);

            if (maybe_array.ok()) {
                chunks_[chunk_index] = *std::move(maybe_array);
                return turbo::OkStatus();
            } else {
                return WrapConversionError(maybe_array.status());
            }
        }

        turbo::Status WrapConversionError(const turbo::Status &st) {
            if (TURBO_LIKELY(st.ok())) {
                return st;
            } else {
                std::stringstream ss;
                ss << "In CSV column #" << col_index_ << ": " << st.message();
                return st.with_message(ss.str());
            }
        }

        MemoryPool *pool_;
        int32_t col_index_;

        ArrayVector chunks_;

        std::mutex mutex_;
    };

//////////////////////////////////////////////////////////////////////////
// Null column builder implementation (for a column not in the CSV file)

    class NullColumnBuilder : public ConcreteColumnBuilder {
    public:
        explicit NullColumnBuilder(const std::shared_ptr<DataType> &type, MemoryPool *pool,
                                   const std::shared_ptr<TaskGroup> &task_group)
                : ConcreteColumnBuilder(pool, task_group), type_(type) {}

        void insert(int64_t block_index, const std::shared_ptr<BlockParser> &parser) override;

    protected:
        std::shared_ptr<DataType> type() const override { return type_; }

        std::shared_ptr<DataType> type_;
    };

    void NullColumnBuilder::insert(int64_t block_index,
                                   const std::shared_ptr<BlockParser> &parser) {
        ReserveChunks(block_index);

        // Spawn a task that will build an array of nulls with the right DataType
        const int32_t num_rows = parser->num_rows();
                DKCHECK_GE(num_rows, 0);

        task_group_->append([this, block_index, num_rows]() -> turbo::Status {
            std::unique_ptr<ArrayBuilder> builder;
            TURBO_RETURN_NOT_OK(MakeBuilder(pool_, type_, &builder));
            std::shared_ptr<Array> res;
            TURBO_RETURN_NOT_OK(builder->append_nulls(num_rows));
            TURBO_RETURN_NOT_OK(builder->finish(&res));

            return SetChunk(block_index, res);
        });
    }

//////////////////////////////////////////////////////////////////////////
// Pre-typed column builder implementation

    class TypedColumnBuilder : public ConcreteColumnBuilder {
    public:
        TypedColumnBuilder(const std::shared_ptr<DataType> &type, int32_t col_index,
                           const ConvertOptions &options, MemoryPool *pool,
                           const std::shared_ptr<TaskGroup> &task_group)
                : ConcreteColumnBuilder(pool, task_group, col_index),
                  type_(type),
                  options_(options) {}

        turbo::Status init();

        void insert(int64_t block_index, const std::shared_ptr<BlockParser> &parser) override;

    protected:
        std::shared_ptr<DataType> type() const override { return type_; }

        std::shared_ptr<DataType> type_;
        // CAUTION: ConvertOptions can grow large (if it customizes hundreds or
        // thousands of columns), so avoid copying it in each TypedColumnBuilder.
        const ConvertOptions &options_;

        std::shared_ptr<Converter> converter_;
    };

    turbo::Status TypedColumnBuilder::init() {
        TURBO_MOVE_OR_RAISE(converter_, Converter::create(type_, options_, pool_));
        return turbo::OkStatus();
    }

    void TypedColumnBuilder::insert(int64_t block_index,
                                    const std::shared_ptr<BlockParser> &parser) {
                DKCHECK_NE(converter_, nullptr);

        ReserveChunks(block_index);

        // We're careful that all references in the closure outlive the append() call
        task_group_->append([this, parser, block_index]() -> turbo::Status {
            return SetChunk(block_index, converter_->Convert(*parser, col_index_));
        });
    }

//////////////////////////////////////////////////////////////////////////
// Type-inferring column builder implementation

    class InferringColumnBuilder : public ConcreteColumnBuilder {
    public:
        InferringColumnBuilder(int32_t col_index, const ConvertOptions &options,
                               MemoryPool *pool, const std::shared_ptr<TaskGroup> &task_group)
                : ConcreteColumnBuilder(pool, task_group, col_index),
                  options_(options),
                  infer_status_(options) {}

        turbo::Status init();

        void insert(int64_t block_index, const std::shared_ptr<BlockParser> &parser) override;

        turbo::Result<std::shared_ptr<ChunkedArray>> finish() override;

    protected:
        std::shared_ptr<DataType> type() const override {
                    DKCHECK_NE(converter_, nullptr);
            return converter_->type();
        }

        turbo::Status UpdateType();

        turbo::Status TryConvertChunk(int64_t chunk_index);

        // This must be called unlocked!
        void ScheduleConvertChunk(int64_t chunk_index);

        // CAUTION: ConvertOptions can grow large (if it customizes hundreds or
        // thousands of columns), so avoid copying it in each InferringColumnBuilder.
        const ConvertOptions &options_;

        // Current inference status
        InferStatus infer_status_;
        std::shared_ptr<Converter> converter_;

        // The parsers corresponding to each chunk (for reconverting)
        std::vector<std::shared_ptr<BlockParser>> parsers_;
    };

    turbo::Status InferringColumnBuilder::init() { return UpdateType(); }

    turbo::Status InferringColumnBuilder::UpdateType() {
        return infer_status_.make_converter(pool_).try_value(&converter_);
    }

    void InferringColumnBuilder::ScheduleConvertChunk(int64_t chunk_index) {
        task_group_->append([this, chunk_index]() { return TryConvertChunk(chunk_index); });
    }

    turbo::Status InferringColumnBuilder::TryConvertChunk(int64_t chunk_index) {
        std::unique_lock<std::mutex> lock(mutex_);
        std::shared_ptr<Converter> converter = converter_;
        std::shared_ptr<BlockParser> parser = parsers_[chunk_index];
        InferKind kind = infer_status_.kind();

                DKCHECK_NE(parser, nullptr);

        lock.unlock();
        auto maybe_array = converter->Convert(*parser, col_index_);
        lock.lock();

        if (kind != infer_status_.kind()) {
            // infer_kind_ was changed by another task, reconvert
            lock.unlock();
            ScheduleConvertChunk(chunk_index);
            return turbo::OkStatus();
        }

        if (maybe_array.ok() || !infer_status_.can_loosen_type()) {
            // Conversion succeeded, or failed definitively
            if (!infer_status_.can_loosen_type()) {
                // We won't try to reconvert anymore
                parsers_[chunk_index].reset();
            }
            return SetChunkUnlocked(chunk_index, maybe_array);
        }

        // Conversion failed, try another type
        infer_status_.LoosenType(maybe_array.status());
        TURBO_RETURN_NOT_OK(UpdateType());

        // Reconvert past finished chunks
        // (unfinished chunks will notice by themselves if they need reconverting)
        const auto nchunks = static_cast<int64_t>(chunks_.size());
        for (int64_t i = 0; i < nchunks; ++i) {
            if (i != chunk_index && chunks_[i]) {
                // We're assuming the chunk was converted using the wrong type
                // (which should be true unless the executor reorders tasks)
                chunks_[i].reset();
                lock.unlock();
                ScheduleConvertChunk(i);
                lock.lock();
            }
        }

        // Reconvert this chunk
        lock.unlock();
        ScheduleConvertChunk(chunk_index);

        return turbo::OkStatus();
    }

    void InferringColumnBuilder::insert(int64_t block_index,
                                        const std::shared_ptr<BlockParser> &parser) {
        // create a slot for the new chunk and spawn a task to convert it
        size_t chunk_index = static_cast<size_t>(block_index);
        {
            std::lock_guard<std::mutex> lock(mutex_);

                    DKCHECK_NE(converter_, nullptr);
            if (parsers_.size() <= chunk_index) {
                parsers_.resize(chunk_index + 1);
            }
            // Should not insert an already converting chunk
                    DKCHECK_EQ(parsers_[chunk_index], nullptr);
            parsers_[chunk_index] = parser;
            ReserveChunksUnlocked(block_index);
        }

        ScheduleConvertChunk(chunk_index);
    }

    turbo::Result<std::shared_ptr<ChunkedArray>> InferringColumnBuilder::finish() {
        std::lock_guard<std::mutex> lock(mutex_);

        parsers_.clear();
        return FinishUnlocked();
    }

    //////////////////////////////////////////////////////////////////////////
    // Factory functions

    turbo::Result<std::shared_ptr<ColumnBuilder>> ColumnBuilder::create(
            MemoryPool *pool, const std::shared_ptr<DataType> &type, int32_t col_index,
            const ConvertOptions &options, const std::shared_ptr<TaskGroup> &task_group) {
        auto ptr =
                std::make_shared<TypedColumnBuilder>(type, col_index, options, pool, task_group);
        TURBO_RETURN_NOT_OK(ptr->init());
        return ptr;
    }

    turbo::Result<std::shared_ptr<ColumnBuilder>> ColumnBuilder::create(
            MemoryPool *pool, int32_t col_index, const ConvertOptions &options,
            const std::shared_ptr<TaskGroup> &task_group) {
        auto ptr =
                std::make_shared<InferringColumnBuilder>(col_index, options, pool, task_group);
        TURBO_RETURN_NOT_OK(ptr->init());
        return ptr;
    }

    turbo::Result<std::shared_ptr<ColumnBuilder>> ColumnBuilder::MakeNull(
            MemoryPool *pool, const std::shared_ptr<DataType> &type,
            const std::shared_ptr<TaskGroup> &task_group) {
        return std::make_shared<NullColumnBuilder>(type, pool, task_group);
    }

}  // namespace nebula::csv
