// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <numeric>
#include <sstream>

#include <nebula/extension/fixed_shape_tensor.h>
#include <nebula/extension/tensor_internal.h>
#include <nebula/types/scalar.h>

#include <nebula/array/array_nested.h>
#include <nebula/array/array_primitive.h>
#include <nebula/json/rapidjson_defs.h>  // IWYU pragma: keep
#include <nebula/types/tensor.h>
#include <nebula/numeric/int_util_overflow.h>
#include <turbo/log/logging.h>
#include <nebula/util/print.h>
#include <turbo/algorithm/algorithm.h>
#include <nebula/util/string.h>

#include <merak/json.h>


namespace nebula {
    namespace extension {
        namespace {
            turbo::Status ComputeStrides(const FixedWidthType &type, const std::vector<int64_t> &shape,
                                         const std::vector<int64_t> &permutation,
                                         std::vector<int64_t> *strides) {
                if (permutation.empty()) {
                    return internal::ComputeRowMajorStrides(type, shape, strides);
                }

                const int byte_width = type.byte_width();

                int64_t remaining = 0;
                if (!shape.empty() && shape.front() > 0) {
                    remaining = byte_width;
                    for (auto i: permutation) {
                        if (i > 0) {
                            if (internal::MultiplyWithOverflow(remaining, shape[i], &remaining)) {
                                return turbo::invalid_argument_error(
                                    "Strides computed from shape would not fit in 64-bit integer");
                            }
                        }
                    }
                }

                if (remaining == 0) {
                    strides->assign(shape.size(), byte_width);
                    return turbo::OkStatus();
                }

                strides->push_back(remaining);
                for (auto i: permutation) {
                    if (i > 0) {
                        remaining /= shape[i];
                        strides->push_back(remaining);
                    }
                }
                turbo::permute(permutation, strides);

                return turbo::OkStatus();
            }
        } // namespace

        bool FixedShapeTensorType::ExtensionEquals(const ExtensionType &other) const {
            if (extension_name() != other.extension_name()) {
                return false;
            }
            const auto &other_ext = turbo::checked_cast<const FixedShapeTensorType &>(other);

            auto is_permutation_trivial = [](const std::vector<int64_t> &permutation) {
                for (size_t i = 1; i < permutation.size(); ++i) {
                    if (permutation[i - 1] + 1 != permutation[i]) {
                        return false;
                    }
                }
                return true;
            };
            const bool permutation_equivalent =
            ((permutation_ == other_ext.permutation()) ||
             (permutation_.empty() && is_permutation_trivial(other_ext.permutation())) ||
             (is_permutation_trivial(permutation_) && other_ext.permutation().empty()));

            return (storage_type()->equals(other_ext.storage_type())) &&
                   (this->shape() == other_ext.shape()) && (dim_names_ == other_ext.dim_names()) &&
                   permutation_equivalent;
        }

        std::string FixedShapeTensorType::to_string(bool show_metadata) const {
            std::stringstream ss;
            ss << "extension<" << this->extension_name()
                    << "[value_type=" << value_type_->to_string(show_metadata)
                    << ", shape=" << ::nebula::internal::PrintVector{shape_, ","};

            if (!permutation_.empty()) {
                ss << ", permutation=" << ::nebula::internal::PrintVector{permutation_, ","};
            }
            if (!dim_names_.empty()) {
                ss << ", dim_names=[" << internal::JoinStrings(dim_names_, ",") << "]";
            }
            ss << "]>";
            return ss.str();
        }

        std::string FixedShapeTensorType::serialize() const {
            merak::json::Document document;
            document.SetObject();
            merak::json::Document::AllocatorType &allocator = document.get_allocator();

            merak::json::Value shape(merak::json::kArrayType);
            for (auto v: shape_) {
                shape.push_back(v, allocator);
            }
            document.add_member(merak::json::Value("shape", allocator), shape, allocator);

            if (!permutation_.empty()) {
                merak::json::Value permutation(merak::json::kArrayType);
                for (auto v: permutation_) {
                    permutation.push_back(v, allocator);
                }
                document.add_member(merak::json::Value("permutation", allocator), permutation, allocator);
            }

            if (!dim_names_.empty()) {
                merak::json::Value dim_names(merak::json::kArrayType);
                for (const std::string &v: dim_names_) {
                    dim_names.push_back(merak::json::Value{}.set_string(v.c_str(), allocator), allocator);
                }
                document.add_member(merak::json::Value("dim_names", allocator), dim_names, allocator);
            }

            merak::json::StringBuffer buffer;
            merak::json::Writer<merak::json::StringBuffer> writer(buffer);
            document.accept(writer);
            return buffer.get_string();
        }

        turbo::Result<std::shared_ptr<DataType> > FixedShapeTensorType::deserialize(
            std::shared_ptr<DataType> storage_type, const std::string &serialized_data) const {
            if (storage_type->id() != Type::FIXED_SIZE_LIST) {
                return turbo::invalid_argument_error("Expected FixedSizeList storage type, got ",
                                                     storage_type->to_string());
            }
            auto value_type =
                    turbo::checked_pointer_cast<FixedSizeListType>(storage_type)->get_value_type();
            merak::json::Document document;
            if (document.parse(serialized_data.data(), serialized_data.length()).has_parse_error() ||
                !document.has_member("shape") || !document["shape"].is_array()) {
                return turbo::invalid_argument_error("Invalid serialized JSON data: ", serialized_data);
            }

            std::vector<int64_t> shape;
            for (auto &x: document["shape"].get_array()) {
                shape.emplace_back(x.get_int64());
            }
            std::vector<int64_t> permutation;
            if (document.has_member("permutation")) {
                for (auto &x: document["permutation"].get_array()) {
                    permutation.emplace_back(x.get_int64());
                }
                if (shape.size() != permutation.size()) {
                    return turbo::invalid_argument_error("Invalid permutation");
                }
            }
            std::vector<std::string> dim_names;
            if (document.has_member("dim_names")) {
                for (auto &x: document["dim_names"].get_array()) {
                    dim_names.emplace_back(x.get_string());
                }
                if (shape.size() != dim_names.size()) {
                    return turbo::invalid_argument_error("Invalid dim_names");
                }
            }

            return fixed_shape_tensor(value_type, shape, permutation, dim_names);
        }

        std::shared_ptr<Array> FixedShapeTensorType::make_array(
            std::shared_ptr<ArrayData> data) const {
            DKCHECK_EQ(data->type->id(), Type::EXTENSION);
            DKCHECK_EQ("nebula.fixed_shape_tensor",
                       turbo::checked_cast<const ExtensionType &>(*data->type).extension_name());
            return std::make_shared<ExtensionArray>(data);
        }

        turbo::Result<std::shared_ptr<Tensor> > FixedShapeTensorType::MakeTensor(
            const std::shared_ptr<ExtensionScalar> &scalar) {
            const auto &ext_scalar = turbo::checked_cast<const ExtensionScalar &>(*scalar);
            const auto &ext_type =
                    turbo::checked_cast<const FixedShapeTensorType &>(*scalar->type);
            if (!is_fixed_width(*ext_type.get_value_type())) {
                return turbo::failed_precondition_error("Cannot convert non-fixed-width values to Tensor.");
            }
            const auto &array =
                    turbo::checked_cast<const FixedSizeListScalar *>(ext_scalar.value.get())->value;
            if (array->null_count() > 0) {
                return turbo::invalid_argument_error("Cannot convert data with nulls to Tensor.");
            }
            const auto &value_type =
                    turbo::checked_cast<const FixedWidthType &>(*ext_type.get_value_type());
            const auto byte_width = value_type.byte_width();

            std::vector<int64_t> permutation = ext_type.permutation();
            if (permutation.empty()) {
                permutation.resize(ext_type.ndim());
                std::iota(permutation.begin(), permutation.end(), 0);
            }

            std::vector<int64_t> shape = ext_type.shape();
            turbo::permute<int64_t>(permutation, &shape);

            std::vector<std::string> dim_names = ext_type.dim_names();
            if (!dim_names.empty()) {
                turbo::permute<std::string>(permutation, &dim_names);
            }

            std::vector<int64_t> strides;
            TURBO_RETURN_NOT_OK(ComputeStrides(value_type, shape, permutation, &strides));
            const auto start_position = array->offset() * byte_width;
            const auto size = std::accumulate(shape.begin(), shape.end(), static_cast<int64_t>(1),
                                              std::multiplies<>());
            const auto buffer =
                    SliceBuffer(array->data()->buffers[1], start_position, size * byte_width);

            return Tensor::create(ext_type.get_value_type(), buffer, shape, strides, dim_names);
        }

        turbo::Result<std::shared_ptr<FixedShapeTensorArray> > FixedShapeTensorArray::FromTensor(
            const std::shared_ptr<Tensor> &tensor) {
            auto permutation = turbo::arg_sort(tensor->strides(), std::greater<>());
            if (permutation[0] != 0) {
                return turbo::invalid_argument_error(
                    "Only first-major tensors can be zero-copy converted to arrays");
            }
            permutation.erase(permutation.begin());

            std::vector<int64_t> cell_shape;
            cell_shape.reserve(permutation.size());
            for (auto i: permutation) {
                cell_shape.emplace_back(tensor->shape()[i]);
            }

            std::vector<std::string> dim_names;
            if (!tensor->dim_names().empty()) {
                dim_names.reserve(permutation.size());
                for (auto i: permutation) {
                    dim_names.emplace_back(tensor->dim_names()[i]);
                }
            }

            for (int64_t &i: permutation) {
                --i;
            }

            auto ext_type = turbo::checked_pointer_cast<ExtensionType>(
                fixed_shape_tensor(tensor->type(), cell_shape, permutation, dim_names));

            std::shared_ptr<Array> value_array;
            switch (tensor->type_id()) {
                case Type::UINT8: {
                    value_array = std::make_shared<UInt8Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::INT8: {
                    value_array = std::make_shared<Int8Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::UINT16: {
                    value_array = std::make_shared<UInt16Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::INT16: {
                    value_array = std::make_shared<Int16Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::UINT32: {
                    value_array = std::make_shared<UInt32Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::INT32: {
                    value_array = std::make_shared<Int32Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::UINT64: {
                    value_array = std::make_shared<Int64Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::INT64: {
                    value_array = std::make_shared<Int64Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::FP16: {
                    value_array = std::make_shared<Fp16Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::FP32: {
                    value_array = std::make_shared<Fp32Array>(tensor->size(), tensor->data());
                    break;
                }
                case Type::FP64: {
                    value_array = std::make_shared<Fp64Array>(tensor->size(), tensor->data());
                    break;
                }
                default: {
                    return turbo::unimplemented_error("Unsupported tensor type: ",
                                                      tensor->type()->to_string());
                }
            }
            auto cell_size = static_cast<int32_t>(tensor->size() / tensor->shape()[0]);
            TURBO_MOVE_OR_RAISE(std::shared_ptr<Array> arr,
                                FixedSizeListArray::from_arrays(value_array, cell_size));
            std::shared_ptr<Array> ext_arr = ExtensionType::WrapArray(ext_type, arr);
            return std::static_pointer_cast<FixedShapeTensorArray>(ext_arr);
        }

        const turbo::Result<std::shared_ptr<Tensor> > FixedShapeTensorArray::ToTensor() const {
            // To convert an array of n dimensional tensors to a n+1 dimensional tensor we
            // interpret the array's length as the first dimension the new tensor.

            const auto &ext_type =
                    turbo::checked_cast<const FixedShapeTensorType &>(*this->type());
            const auto &value_type = ext_type.get_value_type();
            TURBO_RETURN_IF(
                !is_fixed_width(*value_type),
                turbo::failed_precondition_error(value_type->to_string(), " is not valid data type for a tensor"));

            // ext_type->permutation() gives us permutation for a single row with values in
            // range [0, ndim). Here want to create a ndim + 1 dimensional tensor from the entire
            // array and we assume the first dimension will always have the greatest stride, so it
            // will get permutation index 0 and remaining values from ext_type->permutation() need
            // to be shifted to fill the [1, ndim+1) range. Computed permutation will be used to
            // generate the new tensor's shape, strides and dim_names.
            std::vector<int64_t> permutation = ext_type.permutation();
            if (permutation.empty()) {
                permutation.resize(ext_type.ndim() + 1);
                std::iota(permutation.begin(), permutation.end(), 0);
            } else {
                for (auto i = 0; i < static_cast<int64_t>(ext_type.ndim()); i++) {
                    permutation[i] += 1;
                }
                permutation.insert(permutation.begin(), 1, 0);
            }

            std::vector<std::string> dim_names = ext_type.dim_names();
            if (!dim_names.empty()) {
                dim_names.insert(dim_names.begin(), 1, "");
                turbo::permute<std::string>(permutation, &dim_names);
            }

            std::vector<int64_t> shape = ext_type.shape();
            auto cell_size = std::accumulate(shape.begin(), shape.end(), static_cast<int64_t>(1),
                                             std::multiplies<>());
            shape.insert(shape.begin(), 1, this->length());
            turbo::permute<int64_t>(permutation, &shape);

            std::vector<int64_t> tensor_strides;
            const auto *fw_value_type = turbo::checked_cast<FixedWidthType *>(value_type.get());
            TURBO_RETURN_NOT_OK(
                ComputeStrides(*fw_value_type, shape, permutation, &tensor_strides));

            const auto &raw_buffer = this->storage()->data()->child_data[0]->buffers[1];
            TURBO_MOVE_OR_RAISE(
                const auto buffer,
                SliceBufferSafe(raw_buffer, this->offset() * cell_size * value_type->byte_width()));

            return Tensor::create(value_type, buffer, shape, tensor_strides, dim_names);
        }

        turbo::Result<std::shared_ptr<DataType> > FixedShapeTensorType::create(
            const std::shared_ptr<DataType> &value_type, const std::vector<int64_t> &shape,
            const std::vector<int64_t> &permutation, const std::vector<std::string> &dim_names) {
            const size_t ndim = shape.size();
            if (!permutation.empty() && ndim != permutation.size()) {
                return turbo::invalid_argument_error("permutation size must match shape size. Expected: ", ndim,
                                                     " Got: ", permutation.size());
            }
            if (!dim_names.empty() && ndim != dim_names.size()) {
                return turbo::invalid_argument_error("dim_names size must match shape size. Expected: ", ndim,
                                                     " Got: ", dim_names.size());
            }
            if (!permutation.empty()) {
                TURBO_RETURN_NOT_OK(internal::is_permutation_valid(permutation));
            }

            const int64_t size = std::accumulate(shape.begin(), shape.end(),
                                                 static_cast<int64_t>(1), std::multiplies<>());
            return std::make_shared<FixedShapeTensorType>(value_type, static_cast<int32_t>(size),
                                                          shape, permutation, dim_names);
        }

        const std::vector<int64_t> &FixedShapeTensorType::strides() {
            if (strides_.empty()) {
                auto value_type = turbo::checked_cast<FixedWidthType *>(this->value_type_.get());
                std::vector<int64_t> tensor_strides;
                KCHECK_OK(
                    ComputeStrides(*value_type, this->shape(), this->permutation(), &tensor_strides));
                strides_ = tensor_strides;
            }
            return strides_;
        }

        std::shared_ptr<DataType> fixed_shape_tensor(const std::shared_ptr<DataType> &value_type,
                                                     const std::vector<int64_t> &shape,
                                                     const std::vector<int64_t> &permutation,
                                                     const std::vector<std::string> &dim_names) {
            auto maybe_type = FixedShapeTensorType::create(value_type, shape, permutation, dim_names);
            KCHECK_OK(maybe_type.status());
            return maybe_type.move_value_unsafe();
        }
    } // namespace extension
} // namespace nebula
namespace nebula::internal {
    turbo::Status is_permutation_valid(const std::vector<int64_t> &permutation) {
        const auto size = static_cast<int64_t>(permutation.size());
        std::vector<uint8_t> dim_seen(size, 0);

        for (const auto p: permutation) {
            if (p < 0 || p >= size || dim_seen[p] != 0) {
                return turbo::invalid_argument_error(
                    "Permutation indices for ", size,
                    " dimensional tensors must be unique and within [0, ", size - 1,
                    "] range. Got: ", ::nebula::internal::PrintVector{permutation, ","});
            }
            dim_seen[p] = 1;
        }
        return turbo::OkStatus();
    }
} // namespace nebula::internal
