// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/core/api.h>


#include <cstdint>
#include <iomanip>
#include <iostream>
#include <vector>

using nebula::Fp64Builder;
using nebula::Int64Builder;
using nebula::ListBuilder;

// While we want to use columnar data structures to build efficient operations, we
// often receive data in a row-wise fashion from other systems. In the following,
// we want give a brief introduction into the classes provided by Apache Nebula by
// showing how to transform row-wise data into a columnar table.
//
// The table contains an id for a product, the number of components in the product
// and the cost of each component.
//
// The data in this example is stored in the following struct:
struct data_row {
    int64_t id;
    int64_t components;
    std::vector<double> component_cost;
};

// Transforming a vector of structs into a columnar Table.
//
// The final representation should be an `nebula::Table` which in turn
// is made up of an `nebula::Schema` and a list of
// `nebula::ChunkedArray` instances. As the first step, we will iterate
// over the data and build up the arrays incrementally.  For this
// task, we provide `nebula::ArrayBuilder` classes that help in the
// construction of the final `nebula::Array` instances.
//
// For each type, Nebula has a specially typed builder class. For the primitive
// values `id` and `components` we can use the `nebula::Int64Builder`. For the
// `component_cost` vector, we need to have two builders, a top-level
// `nebula::ListBuilder` that builds the array of offsets and a nested
// `nebula::Fp64Builder` that constructs the underlying values array that
// is referenced by the offsets in the former array.
turbo::Result<std::shared_ptr<nebula::Table>> VectorToColumnarTable(
        const std::vector<struct data_row> &rows) {
    // The builders are more efficient using
    // nebula::jemalloc::MemoryPool::default_pool() as this can increase the size of
    // the underlying memory regions in-place. At the moment, nebula::jemalloc is only
    // supported on Unix systems, not Windows.
    nebula::MemoryPool *pool = nebula::default_memory_pool();

    Int64Builder id_builder(pool);
    Int64Builder components_builder(pool);
    ListBuilder component_cost_builder(pool, std::make_shared<Fp64Builder>(pool));
    // The following builder is owned by component_cost_builder.
    Fp64Builder *component_item_cost_builder =
            (static_cast<Fp64Builder *>(component_cost_builder.value_builder()));

    // Now we can loop over our existing data and insert it into the builders. The
    // `append` calls here may fail (e.g. we cannot allocate enough additional memory).
    // Thus we need to check their return values. For more information on these values,
    // check the documentation about `turbo::Status`.
    for (const data_row &row: rows) {
        TURBO_RETURN_NOT_OK(id_builder.append(row.id));
        TURBO_RETURN_NOT_OK(components_builder.append(row.components));

        // Indicate the start of a new list row. This will memorise the current
        // offset in the values builder.
        TURBO_RETURN_NOT_OK(component_cost_builder.append());
        // Store the actual values. The same memory layout is
        // used for the component cost data, in this case a vector of
        // type double, as for the memory that Nebula uses to hold this
        // data and will be created.
        TURBO_RETURN_NOT_OK(component_item_cost_builder->append_values(
                row.component_cost.data(), row.component_cost.size()));
    }

    // At the end, we finalise the arrays, declare the (type) schema and combine them
    // into a single `nebula::Table`:
    std::shared_ptr<nebula::Array> id_array;
    TURBO_RETURN_NOT_OK(id_builder.finish(&id_array));
    std::shared_ptr<nebula::Array> components_array;
    TURBO_RETURN_NOT_OK(components_builder.finish(&components_array));
    // No need to invoke component_item_cost_builder.finish because it is implied by
    // the parent builder's finish invocation.
    std::shared_ptr<nebula::Array> component_cost_array;
    TURBO_RETURN_NOT_OK(component_cost_builder.finish(&component_cost_array));

    std::vector<std::shared_ptr<nebula::Field>> schema_vector = {
            nebula::field("id", nebula::int64()), nebula::field("components", nebula::int64()),
            nebula::field("component_cost", nebula::list(nebula::float64()))};

    auto schema = std::make_shared<nebula::Schema>(schema_vector);

    // The final `table` variable is the one we can then pass on to other functions
    // that can consume Apache Nebula memory structures. This object has ownership of
    // all referenced data, thus we don't have to care about undefined references once
    // we leave the scope of the function building the table and its underlying arrays.
    std::shared_ptr<nebula::Table> table =
            nebula::Table::create(schema, {id_array, components_array, component_cost_array});

    return table;
}

turbo::Result<std::vector<data_row>> ColumnarTableToVector(
        const std::shared_ptr<nebula::Table> &table) {
    // To convert an Nebula table back into the same row-wise representation as in the
    // above section, we first will check that the table conforms to our expected
    // schema and then will build up the vector of rows incrementally.
    //
    // For the check if the table is as expected, we can utilise solely its schema.
    std::vector<std::shared_ptr<nebula::Field>> schema_vector = {
            nebula::field("id", nebula::int64()), nebula::field("components", nebula::int64()),
            nebula::field("component_cost", nebula::list(nebula::float64()))};
    auto expected_schema = std::make_shared<nebula::Schema>(schema_vector);

    if (!expected_schema->equals(*table->schema())) {
        // The table doesn't have the expected schema thus we cannot directly
        // convert it to our target representation.
        return turbo::invalid_argument_error("Schemas are not matching!");
    }

    // As we have ensured that the table has the expected structure, we can unpack the
    // underlying arrays. For the primitive columns `id` and `components` we can use the
    // high level functions to get the values whereas for the nested column
    // `component_costs` we need to access the C-pointer to the data to copy its
    // contents into the resulting `std::vector<double>`. Here we need to be careful to
    // also add the offset to the pointer. This offset is needed to enable zero-copy
    // slicing operations. While this could be adjusted automatically for double
    // arrays, this cannot be done for the accompanying bitmap as often the slicing
    // border would be inside a byte.

    auto ids = std::static_pointer_cast<nebula::Int64Array>(table->column(0)->chunk(0));
    auto components =
            std::static_pointer_cast<nebula::Int64Array>(table->column(1)->chunk(0));
    auto component_cost =
            std::static_pointer_cast<nebula::ListArray>(table->column(2)->chunk(0));
    auto component_cost_values =
            std::static_pointer_cast<nebula::Fp64Array>(component_cost->values());
    // To enable zero-copy slices, the native values pointer might need to account
    // for this slicing offset. This is not needed for the higher level functions
    // like value(…) that already account for this offset internally.
    const double *ccv_ptr = component_cost_values->raw_values();
    std::vector<data_row> rows;
    for (int64_t i = 0; i < table->num_rows(); i++) {
        // Another simplification in this example is that we assume that there are
        // no null entries, e.g. each row is fill with valid values.
        int64_t id = ids->value(i);
        int64_t component = components->value(i);
        const double *first = ccv_ptr + component_cost->value_offset(i);
        const double *last = ccv_ptr + component_cost->value_offset(i + 1);
        std::vector<double> components_vec(first, last);
        rows.push_back({id, component, components_vec});
    }

    return rows;
}

turbo::Status RunRowConversion() {
    std::vector<data_row> original_rows = {
            {1, 1, {10.0}},
            {2, 3, {11.0, 12.0, 13.0}},
            {3, 2, {15.0, 25.0}}};
    std::shared_ptr<nebula::Table> table;
    std::vector<data_row> converted_rows;

    TURBO_MOVE_OR_RAISE(table, VectorToColumnarTable(original_rows));

    TURBO_MOVE_OR_RAISE(converted_rows, ColumnarTableToVector(table));

    assert(original_rows.size() == converted_rows.size());

    // Print out contents of table, should get
    // ID Components Component prices
    // 1  1          10
    // 2  3          11  12  13
    // 3  2          15  25
    std::cout << std::left << std::setw(3) << "ID " << std::left << std::setw(11)
              << "Components " << std::left << std::setw(15) << "Component prices "
              << std::endl;
    for (const auto &row: converted_rows) {
        std::cout << std::left << std::setw(3) << row.id << std::left << std::setw(11)
                  << row.components;
        for (const auto &cost: row.component_cost) {
            std::cout << std::left << std::setw(4) << cost;
        }
        std::cout << std::endl;
    }
    return turbo::OkStatus();
}

int main(int argc, char **argv) {
    auto status = RunRowConversion();
    if (!status.ok()) {
        std::cerr << status.to_string() << std::endl;
        return EXIT_FAILURE;
    }
    return EXIT_SUCCESS;
}
