// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#define RAPIDJSON_HAS_STDSTRING 1

#include <nebula/core/api.h>

#include <nebula/core/table_builder.h>
#include <nebula/types/type_traits.h>
#include <turbo/functional/iterator.h>
#include <turbo/log/logging.h>
#include <nebula/core/visit_array_inline.h>

#include <merak/json.h>
#include <cassert>
#include <iostream>
#include <vector>

// Transforming dynamic row data into Nebula data
// When building connectors to other data systems, it's common to receive data in
// row-based structures. While the row_wise_conversion_example.cc shows how to
// handle this conversion for fixed schemas, this example demonstrates how to
// writer converters for arbitrary schemas.
//
// As an example, this conversion is between Nebula and merak::json::Documents.
//
// We use the following helpers and patterns here:
//  * nebula::visit_array_inline and nebula::visit_type_inline for implementing a visitor
//    pattern with Nebula to handle different array types
//  * nebula::enable_if_primitive_ctype to create a template method that handles
//    conversion for Nebula types that have corresponding C types (bool, integer,
//    float).

const merak::json::Value kNullJsonSingleton = merak::json::Value();

/// \brief Builder that holds state for a single conversion.
///
/// Implements Visit() methods for each type of Nebula Array that set the values
/// of the corresponding fields in each row.
class RowBatchBuilder {
public:
    explicit RowBatchBuilder(int64_t num_rows) : field_(nullptr) {
        // Reserve all of the space required up-front to avoid unnecessary resizing
        rows_.reserve(num_rows);

        for (int64_t i = 0; i < num_rows; ++i) {
            rows_.push_back(merak::json::Document());
            rows_[i].SetObject();
        }
    }

    /// \brief Set which field to convert.
    void set_field(const nebula::Field *field) { field_ = field; }

    /// \brief Retrieve converted rows from builder.
    std::vector<merak::json::Document> Rows() &&{ return std::move(rows_); }

    // Default implementation
    turbo::Status Visit(const nebula::Array &array) {
        return turbo::unimplemented_error(
                "Cannot convert to json document for array of type ", array.type()->to_string());
    }

    // Handles booleans, integers, floats
    template<typename ArrayType, typename DataClass = typename ArrayType::TypeClass>
    nebula::enable_if_primitive_ctype<DataClass, turbo::Status> Visit(
            const ArrayType &array) {
        assert(static_cast<int64_t>(rows_.size()) == array.length());
        for (int64_t i = 0; i < array.length(); ++i) {
            if (!array.is_null(i)) {
                merak::json::Value str_key(field_->name(), rows_[i].get_allocator());
                rows_[i].add_member(str_key, array.value(i), rows_[i].get_allocator());
            }
        }
        return turbo::OkStatus();
    }

    turbo::Status Visit(const nebula::StringArray &array) {
        assert(static_cast<int64_t>(rows_.size()) == array.length());
        for (int64_t i = 0; i < array.length(); ++i) {
            if (!array.is_null(i)) {
                merak::json::Value str_key(field_->name(), rows_[i].get_allocator());
                std::string_view value_view = array.value(i);
                merak::json::Value value;
                value.set_string(value_view.data(),
                                static_cast<merak::json::SizeType>(value_view.size()),
                                rows_[i].get_allocator());
                rows_[i].add_member(str_key, value, rows_[i].get_allocator());
            }
        }
        return turbo::OkStatus();
    }

    turbo::Status Visit(const nebula::StructArray &array) {
        const nebula::StructType *type = array.struct_type();

        assert(static_cast<int64_t>(rows_.size()) == array.length());

        RowBatchBuilder child_builder(rows_.size());
        for (int i = 0; i < type->num_fields(); ++i) {
            const nebula::Field *child_field = type->field(i).get();
            child_builder.set_field(child_field);
            TURBO_RETURN_NOT_OK(nebula::visit_array_inline(*array.field(i).get(), &child_builder));
        }
        std::vector<merak::json::Document> rows = std::move(child_builder).Rows();

        for (int64_t i = 0; i < array.length(); ++i) {
            if (!array.is_null(i)) {
                merak::json::Value str_key(field_->name(), rows_[i].get_allocator());
                // Must copy value to new allocator
                merak::json::Value row_val;
                row_val.copy_from(rows[i], rows_[i].get_allocator());
                rows_[i].add_member(str_key, row_val, rows_[i].get_allocator());
            }
        }
        return turbo::OkStatus();
    }

    turbo::Status Visit(const nebula::ListArray &array) {
        assert(static_cast<int64_t>(rows_.size()) == array.length());
        // First create rows from values
        std::shared_ptr<nebula::Array> values = array.values();
        RowBatchBuilder child_builder(values->length());
        const nebula::Field *value_field = array.list_type()->value_field().get();
        std::string value_field_name = value_field->name();
        child_builder.set_field(value_field);
        TURBO_RETURN_NOT_OK(nebula::visit_array_inline(*values.get(), &child_builder));

        std::vector<merak::json::Document> rows = std::move(child_builder).Rows();

        int64_t values_i = 0;
        for (int64_t i = 0; i < array.length(); ++i) {
            if (array.is_null(i)) continue;

            merak::json::Document::AllocatorType &allocator = rows_[i].get_allocator();
            auto array_len = array.value_length(i);

            merak::json::Value value;
            value.set_array();
            value.reserve(array_len, allocator);

            for (int64_t j = 0; j < array_len; ++j) {
                merak::json::Value row_val;
                // Must copy value to new allocator
                row_val.copy_from(rows[values_i][value_field_name], allocator);
                value.push_back(row_val, allocator);
                ++values_i;
            }

            merak::json::Value str_key(field_->name(), allocator);
            rows_[i].add_member(str_key, value, allocator);
        }

        return turbo::OkStatus();
    }

private:
    const nebula::Field *field_;
    std::vector<merak::json::Document> rows_;
};  // RowBatchBuilder

class ArrowToDocumentConverter {
public:
    /// Convert a single batch of Nebula data into Documents
    turbo::Result<std::vector<merak::json::Document>> ConvertToVector(
            std::shared_ptr<nebula::RecordBatch> batch) {
        RowBatchBuilder builder{batch->num_rows()};

        for (int i = 0; i < batch->num_columns(); ++i) {
            builder.set_field(batch->schema()->field(i).get());
            TURBO_RETURN_NOT_OK(nebula::visit_array_inline(*batch->column(i).get(), &builder));
        }

        return std::move(builder).Rows();
    }

    /// Convert an Nebula table into an iterator of Documents
    turbo::Iterator<merak::json::Document> ConvertToIterator(
            std::shared_ptr<nebula::Table> table, size_t batch_size) {
        // Use TableBatchReader to divide table into smaller batches. The batches
        // created are zero-copy slices with *at most* `batch_size` rows.
        auto batch_reader = std::make_shared<nebula::TableBatchReader>(*table);
        batch_reader->set_chunk_size(batch_size);

        auto read_batch = [this](const std::shared_ptr<nebula::RecordBatch> &batch)
                -> turbo::Result<turbo::Iterator<merak::json::Document>> {
            TURBO_MOVE_OR_RAISE(auto rows, ConvertToVector(batch));
            return turbo::make_vector_iterator(std::move(rows));
        };

        auto nested_iter = turbo::make_maybe_map_iterator(
                read_batch, turbo::make_iterator_from_reader(std::move(batch_reader)));

        return turbo::make_flatten_iterator(std::move(nested_iter));
    }
};  // ArrowToDocumentConverter

/// \brief turbo::Iterator over rows values of a document for a given field
///
/// path and array_levels are used to address each field in a JSON document. As
/// an example, consider this JSON document:
/// {
///     "x": 3,                   // path: ["x"],             array_levels: 0
///     "files": [                // path: ["files"],         array_levels: 0
///         {                     // path: ["files"],         array_levels: 1
///             "path": "my_str", // path: ["files", "path"], array_levels: 1
///             "sizes": [        // path: ["files", "size"], array_levels: 1
///                 20,           // path: ["files", "size"], array_levels: 2
///                 22
///             ]
///         }
///     ]
/// },
class DocValuesIterator {
public:
    /// \param rows vector of rows
    /// \param path field names to enter
    /// \param array_levels number of arrays to enter
    DocValuesIterator(const std::vector<merak::json::Document> &rows,
                      std::vector<std::string> path, int64_t array_levels)
            : rows(rows), path(std::move(path)), array_levels(array_levels) {}

    const merak::json::Value *NextArrayOrRow(const merak::json::Value *value, size_t *path_i,
                                           int64_t *arr_i) {
        while (array_stack.size() > 0) {
            ArrayPosition &pos = array_stack.back();
            // Try to get next position in Array
            if (pos.index + 1 < pos.array_node->size()) {
                ++pos.index;
                value = &(*pos.array_node)[pos.index];
                *path_i = pos.path_index;
                *arr_i = array_stack.size();
                return value;
            } else {
                array_stack.pop_back();
            }
        }
        ++row_i;
        if (row_i < rows.size()) {
            value = static_cast<const merak::json::Value *>(&rows[row_i]);
        } else {
            value = nullptr;
        }
        *path_i = 0;
        *arr_i = 0;
        return value;
    }

    turbo::Result<const merak::json::Value *> next() {
        const merak::json::Value *value = nullptr;
        size_t path_i;
        int64_t arr_i;
        // Can either start at document or at last array level
        if (array_stack.size() > 0) {
            auto pos = array_stack.back();
            value = pos.array_node;
            path_i = pos.path_index;
            arr_i = array_stack.size() - 1;
        }

        value = NextArrayOrRow(value, &path_i, &arr_i);

        // Traverse to desired level (with possible backtracking as needed)
        while (path_i < path.size() || arr_i < array_levels) {
            if (value == nullptr) {
                return value;
            } else if (value->is_array() && value->size() > 0) {
                ArrayPosition pos;
                pos.array_node = value;
                pos.path_index = path_i;
                pos.index = 0;
                array_stack.push_back(pos);

                value = &(*value)[0];
                ++arr_i;
            } else if (value->is_array()) {
                // Empty array means we need to backtrack and go to next array or row
                value = NextArrayOrRow(value, &path_i, &arr_i);
            } else if (value->has_member(path[path_i])) {
                value = &(*value)[path[path_i]];
                ++path_i;
            } else {
                return &kNullJsonSingleton;
            }
        }

        // Return value
        return value;
    }

private:
    const std::vector<merak::json::Document> &rows;
    std::vector<std::string> path;
    int64_t array_levels;
    size_t row_i = -1;  // index of current row

    // Info about array position for one array level in array stack
    struct ArrayPosition {
        const merak::json::Value *array_node;
        int64_t path_index;
        merak::json::SizeType index;
    };
    std::vector<ArrayPosition> array_stack;
};

class JsonValueConverter {
public:
    explicit JsonValueConverter(const std::vector<merak::json::Document> &rows)
            : rows_(rows), array_levels_(0) {}

    JsonValueConverter(const std::vector<merak::json::Document> &rows,
                       const std::vector<std::string> &root_path, int64_t array_levels)
            : rows_(rows), root_path_(root_path), array_levels_(array_levels) {}

    /// \brief For field passed in, append corresponding values to builder
    turbo::Status Convert(const nebula::Field &field, nebula::ArrayBuilder *builder) {
        return Convert(field, field.name(), builder);
    }

    /// \brief For field passed in, append corresponding values to builder
    turbo::Status Convert(const nebula::Field &field, const std::string &field_name,
                           nebula::ArrayBuilder *builder) {
        field_name_ = field_name;
        builder_ = builder;
        TURBO_RETURN_NOT_OK(nebula::visit_type_inline(*field.type().get(), this));
        return turbo::OkStatus();
    }

    // Default implementation
    turbo::Status Visit(const nebula::DataType &type) {
        return turbo::unimplemented_error(
                "Cannot convert json value to Nebula array of type ", type.to_string());
    }

    turbo::Status Visit(const nebula::Int64Type &type) {
        nebula::Int64Builder *builder = static_cast<nebula::Int64Builder *>(builder_);
        for (const auto &maybe_value: FieldValues()) {
            TURBO_MOVE_OR_RAISE(auto value, maybe_value);
            if (value->is_null()) {
                TURBO_RETURN_NOT_OK(builder->append_null());
            } else {
                if (value->is_uint32()) {
                    TURBO_RETURN_NOT_OK(builder->append(value->get_uint32()));
                } else if (value->is_int32()) {
                    TURBO_RETURN_NOT_OK(builder->append(value->get_int32()));
                } else if (value->is_uint64()) {
                    TURBO_RETURN_NOT_OK(builder->append(value->get_uint64()));
                } else if (value->is_int64()) {
                    TURBO_RETURN_NOT_OK(builder->append(value->get_int64()));
                } else {
                    return turbo::invalid_argument_error("Value is not an integer");
                }
            }
        }
        return turbo::OkStatus();
    }

    turbo::Status Visit(const nebula::Fp64Type &type) {
        nebula::Fp64Builder *builder = static_cast<nebula::Fp64Builder *>(builder_);
        for (const auto &maybe_value: FieldValues()) {
            TURBO_MOVE_OR_RAISE(auto value, maybe_value);
            if (value->is_null()) {
                TURBO_RETURN_NOT_OK(builder->append_null());
            } else {
                TURBO_RETURN_NOT_OK(builder->append(value->get_double()));
            }
        }
        return turbo::OkStatus();
    }

    turbo::Status Visit(const nebula::StringType &type) {
        nebula::StringBuilder *builder = static_cast<nebula::StringBuilder *>(builder_);
        for (const auto &maybe_value: FieldValues()) {
            TURBO_MOVE_OR_RAISE(auto value, maybe_value);
            if (value->is_null()) {
                TURBO_RETURN_NOT_OK(builder->append_null());
            } else {
                TURBO_RETURN_NOT_OK(builder->append(value->get_string()));
            }
        }
        return turbo::OkStatus();
    }

    turbo::Status Visit(const nebula::BooleanType &type) {
        nebula::BooleanBuilder *builder = static_cast<nebula::BooleanBuilder *>(builder_);
        for (const auto &maybe_value: FieldValues()) {
            TURBO_MOVE_OR_RAISE(auto value, maybe_value);
            if (value->is_null()) {
                TURBO_RETURN_NOT_OK(builder->append_null());
            } else {
                TURBO_RETURN_NOT_OK(builder->append(value->get_bool()));
            }
        }
        return turbo::OkStatus();
    }

    turbo::Status Visit(const nebula::StructType &type) {
        nebula::StructBuilder *builder = static_cast<nebula::StructBuilder *>(builder_);

        std::vector<std::string> child_path(root_path_);
        if (field_name_.size() > 0) {
            child_path.push_back(field_name_);
        }
        auto child_converter = JsonValueConverter(rows_, child_path, array_levels_);

        for (int i = 0; i < type.num_fields(); ++i) {
            std::shared_ptr<nebula::Field> child_field = type.field(i);
            std::shared_ptr<nebula::ArrayBuilder> child_builder = builder->child_builder(i);

            TURBO_RETURN_NOT_OK(
                    child_converter.Convert(*child_field.get(), child_builder.get()));
        }

        // Make null bitmap
        for (const auto &maybe_value: FieldValues()) {
            TURBO_MOVE_OR_RAISE(auto value, maybe_value);
            TURBO_RETURN_NOT_OK(builder->append(!value->is_null()));
        }

        return turbo::OkStatus();
    }

    turbo::Status Visit(const nebula::ListType &type) {
        nebula::ListBuilder *builder = static_cast<nebula::ListBuilder *>(builder_);

        // Values and offsets needs to be interleaved in ListBuilder, so first collect the
        // values
        std::unique_ptr<nebula::ArrayBuilder> tmp_value_builder;
        TURBO_MOVE_OR_RAISE(tmp_value_builder,
                               nebula::MakeBuilder(builder->value_builder()->type()));
        std::vector<std::string> child_path(root_path_);
        child_path.push_back(field_name_);
        auto child_converter = JsonValueConverter(rows_, child_path, array_levels_ + 1);
        TURBO_RETURN_NOT_OK(
                child_converter.Convert(*type.value_field().get(), "", tmp_value_builder.get()));

        std::shared_ptr<nebula::Array> values_array;
        TURBO_RETURN_NOT_OK(tmp_value_builder->finish(&values_array));
        std::shared_ptr<nebula::ArrayData> values_data = values_array->data();

        nebula::ArrayBuilder *value_builder = builder->value_builder();
        int64_t offset = 0;
        for (const auto &maybe_value: FieldValues()) {
            TURBO_MOVE_OR_RAISE(auto value, maybe_value);
            TURBO_RETURN_NOT_OK(builder->append(!value->is_null()));
            if (!value->is_null() && value->size() > 0) {
                TURBO_RETURN_NOT_OK(
                        value_builder->append_array_slice(*values_data.get(), offset, value->size()));
                offset += value->size();
            }
        }

        return turbo::OkStatus();
    }

private:
    std::string field_name_;
    nebula::ArrayBuilder *builder_;
    const std::vector<merak::json::Document> &rows_;
    std::vector<std::string> root_path_;
    int64_t array_levels_;

    /// Return a flattened iterator over values at nested location
    turbo::Iterator<const merak::json::Value *> FieldValues() {
        std::vector<std::string> path(root_path_);
        if (field_name_.size() > 0) {
            path.push_back(field_name_);
        }
        auto iter = DocValuesIterator(rows_, std::move(path), array_levels_);
        auto fn = [iter]() mutable -> turbo::Result<const merak::json::Value *> {
            return iter.next();
        };

        return turbo::make_function_iterator(fn);
    }
};  // JsonValueConverter

turbo::Result<std::shared_ptr<nebula::RecordBatch>> ConvertToRecordBatch(
        const std::vector<merak::json::Document> &rows, std::shared_ptr<nebula::Schema> schema) {
    // RecordBatchBuilder will create array builders for us for each field in our
    // schema. By passing the number of output rows (`rows.size()`) we can
    // pre-allocate the correct size of arrays, except of course in the case of
    // string, byte, and list arrays, which have dynamic lengths.
    std::unique_ptr<nebula::RecordBatchBuilder> batch_builder;
    TURBO_MOVE_OR_RAISE(
            batch_builder,
            nebula::RecordBatchBuilder::create(schema, nebula::default_memory_pool(), rows.size()));

    // Inner converter will take rows and be responsible for appending values
    // to provided array builders.
    JsonValueConverter converter(rows);
    for (int i = 0; i < batch_builder->num_fields(); ++i) {
        std::shared_ptr<nebula::Field> field = schema->field(i);
        nebula::ArrayBuilder *builder = batch_builder->get_field(i);
        TURBO_RETURN_NOT_OK(converter.Convert(*field.get(), builder));
    }

    std::shared_ptr<nebula::RecordBatch> batch;
    TURBO_MOVE_OR_RAISE(batch, batch_builder->flush());

    // Use RecordBatch::validate_full() to make sure arrays were correctly constructed.
            KCHECK_OK(batch->validate_full());
    return batch;
}  // ConvertToRecordBatch

turbo::Status DoRowConversion(int32_t num_rows, int32_t batch_size) {
    //(Doc section: Convert to Nebula)
    // write JSON records
    std::vector<std::string> json_records = {
            R"({"pk": 1, "date_created": "2020-10-01", "data": {"deleted": true, "metrics": [{"key": "x", "value": 1}]}})",
            R"({"pk": 2, "date_created": "2020-10-03", "data": {"deleted": false, "metrics": []}})",
            R"({"pk": 3, "date_created": "2020-10-05", "data": {"deleted": false, "metrics": [{"key": "x", "value": 33}, {"key": "x", "value": 42}]}})"};

    std::vector<merak::json::Document> records;
    records.reserve(num_rows);
    for (int32_t i = 0; i < num_rows; ++i) {
        merak::json::Document document;
        document.parse(json_records[i % json_records.size()]);
        records.push_back(std::move(document));
    }

    for (const merak::json::Document &doc: records) {
        merak::json::StringBuffer sb;
        merak::json::Writer<merak::json::StringBuffer> writer(sb);
        doc.accept(writer);
        std::cout << sb.get_string() << std::endl;
    }
    auto tags_schema = nebula::list(nebula::STRUCT({
                                                            nebula::field("key", nebula::utf8()),
                                                            nebula::field("value", nebula::int64()),
                                                    }));
    auto schema = nebula::schema(
            {nebula::field("pk", nebula::int64()), nebula::field("date_created", nebula::utf8()),
             nebula::field("data", nebula::STRUCT({nebula::field("deleted", nebula::boolean()),
                                                    nebula::field("metrics", tags_schema)}))});

    // Convert records into a table
    TURBO_MOVE_OR_RAISE(std::shared_ptr<nebula::RecordBatch> batch,
                           ConvertToRecordBatch(records, schema));

    TURBO_MOVE_OR_RAISE(std::shared_ptr<nebula::Table> table,
                           nebula::Table::from_record_batches({batch}));

    // Print table
    std::cout << table->to_string() << std::endl;
    TURBO_RETURN_NOT_OK(table->validate_full());
    //(Doc section: Convert to Nebula)

    //(Doc section: Convert to Rows)
    // create converter
    ArrowToDocumentConverter to_doc_converter;

    // Convert table into document (row) iterator
    turbo::Iterator<merak::json::Document> document_iter =
            to_doc_converter.ConvertToIterator(table, batch_size);

    // Print each row
    for (turbo::Result<merak::json::Document> doc_result: document_iter) {
        TURBO_MOVE_OR_RAISE(merak::json::Document doc, std::move(doc_result));

        assert(doc.has_member("pk"));
        assert(doc["pk"].is_int64());
        assert(doc.has_member("date_created"));
        assert(doc["date_created"].is_string());
        assert(doc.has_member("data"));
        assert(doc["data"].is_object());
        assert(doc["data"].has_member("deleted"));
        assert(doc["data"]["deleted"].is_bool());
        assert(doc["data"].has_member("metrics"));
        assert(doc["data"]["metrics"].is_array());
        if (doc["data"]["metrics"].size() > 0) {
            auto metric = &doc["data"]["metrics"][0];
            assert(metric->is_object());
            assert(metric->has_member("key"));
            assert((*metric)["key"].is_string());
            assert(metric->has_member("value"));
            assert((*metric)["value"].is_int64());
        }

        merak::json::StringBuffer sb;
        merak::json::Writer<merak::json::StringBuffer> writer(sb);
        doc.accept(writer);
        std::cout << sb.get_string() << std::endl;
    }
    //(Doc section: Convert to Rows)

    return turbo::OkStatus();
}

int main(int argc, char **argv) {
    int32_t num_rows = argc > 1 ? std::atoi(argv[1]) : 100;
    int32_t batch_size = argc > 2 ? std::atoi(argv[2]) : 100;

    turbo::Status status = DoRowConversion(num_rows, batch_size);

    if (!status.ok()) {
        std::cerr << "Error occurred: " << status.message() << std::endl;
        return EXIT_FAILURE;
    }
    return EXIT_SUCCESS;
}
