/*
 *         (C) COPYRIGHT NBI Limited
 *              ALL RIGHT RESERVED
 *
 * File        : fastflow/graph/node_arg.cc
 * Authors     : dzhang
 * Create Time : 2021/09/04 18:25:32
 * Description :
 *
 */

#include "fastflow/graph/node_arg.h"

#include "fastflow/core/common.h"
#include "onnx/defs/data_type_utils.h"
#include "onnx/onnx-ml.pb.h"

using namespace ONNX_NAMESPACE;
using namespace ONNX_NAMESPACE::Utils;

namespace fastflow {

#define NO_CHANGE_ON_SYNC_FLAG(...)                  \
  do {                                               \
    const bool sync_needed = GraphProtoSyncNeeded(); \
    { __VA_ARGS__; }                                 \
    GraphProtoSyncNeeded(sync_needed);               \
  } while (0)

static Status MergeShapeInfo(const std::string& output_name,
                             const TypeProto_Tensor& source, TypeProto_Tensor& target) {
  try {
    ONNX_NAMESPACE::mergeInShapeInfo(source, target);
  } catch (const ONNX_NAMESPACE::InferenceError& ex) {
    return FASTFLOW_MAKE_STATUS(FASTFLOW, FAIL, "Output:", output_name, " ", ex.what());
  }

  return Status::OK();
}


NodeArg::NodeArg(const std::string& name, const ONNX_NAMESPACE::TypeProto* p_node_arg_type) {
    node_arg_info_.set_name(name);
    // If the name is empty, it means the arg does not exist.
    exists_ = !(name.empty());
    if (nullptr != p_node_arg_type) {
        (*node_arg_info_.mutable_type()) = *p_node_arg_type;
        type_ = ONNX_NAMESPACE::Utils::DataTypeUtils::ToType(node_arg_info_.type());
    } else {
        type_ = nullptr;
    }
}

const std::string& NodeArg::Name() const noexcept {
    return node_arg_info_.name();
}

ONNX_NAMESPACE::DataType NodeArg::Type() const noexcept {
    return type_;
}

const TypeProto* NodeArg::TypeAsProto() const noexcept {
    if (node_arg_info_.has_type())
        return &node_arg_info_.type();
    else
        return nullptr;
}

const TensorShapeProto* NodeArg::Shape() const {
    const TypeProto* type = TypeAsProto();
    if (type == nullptr)
        return nullptr;
    const auto typeCase = type->value_case();
    switch (typeCase) {
        case TypeProto::kTensorType: {
            if (type->tensor_type().has_shape()) {
                return &(type->tensor_type().shape());
            } else {
                return nullptr;
            }
        }
        case TypeProto::kSparseTensorType: {
            if (type->sparse_tensor_type().has_shape()) {
                return &(type->sparse_tensor_type().shape());
            } else {
                return nullptr;
            }
        }
        case TypeProto::kSequenceType:
        case TypeProto::kMapType:
        case TypeProto::kOpaqueType:
        case TypeProto::VALUE_NOT_SET:
        default:
            return nullptr;
    }
}

void NodeArg::SetShape(const TensorShapeProto& shape) {
    if (!node_arg_info_.has_type()) {
        return;
    }

    const auto type_case = node_arg_info_.type().value_case();
    switch (type_case) {
        case TypeProto::kTensorType:
            *(node_arg_info_.mutable_type()->mutable_tensor_type()->mutable_shape()) = shape;
            break;
        case TypeProto::kSparseTensorType:
            *(node_arg_info_.mutable_type()->mutable_sparse_tensor_type()->mutable_shape()) = shape;
            break;
        case TypeProto::kSequenceType:
        case TypeProto::kMapType:
        case TypeProto::kOpaqueType:
        case TypeProto::VALUE_NOT_SET:
        default:
            return;
    }
}

Status NodeArg::UpdateTypeAndShape(const ONNX_NAMESPACE::TypeProto& input_type) {
    if (!node_arg_info_.has_type()) {
        *node_arg_info_.mutable_type() = input_type;
        type_ = DataTypeUtils::ToType(node_arg_info_.type());
        return Status::OK();
    }

    auto& current_type = *node_arg_info_.mutable_type();
    const auto current_type_case = current_type.value_case();
    const auto input_type_case = input_type.value_case();

    if (current_type_case != input_type_case)
        return FASTFLOW_MAKE_STATUS(FASTFLOW, FAIL, "Type mismatch. Current=", current_type_case,
                                       " Input=", input_type_case);

    switch (input_type_case) {
        case TypeProto::kTensorType: {
            const auto& input_tensor_type = input_type.tensor_type();
            const auto& input_tensor_elem_type = input_tensor_type.elem_type();
            const auto& current_tensor_elem_type = current_type.tensor_type().elem_type();

            if (input_tensor_elem_type != current_tensor_elem_type)
                return FASTFLOW_MAKE_STATUS(FASTFLOW, FAIL, "Tensor element type mismatch. ",
                                               static_cast<TensorProto_DataType>(input_tensor_elem_type),
                                               " != ", static_cast<TensorProto_DataType>(current_tensor_elem_type));

            if (input_tensor_type.has_shape()) {
                auto& current_tensor_type = *current_type.mutable_tensor_type();
                if (current_tensor_type.has_shape()) {
                    FASTFLOW_RETURN_IF_ERROR(MergeShapeInfo(Name(), input_tensor_type, current_tensor_type));
                } else {
                    current_tensor_type = input_tensor_type;
                }
            }

            break;
        }
        case TypeProto::kSparseTensorType: {
            const auto& input_tensor_type = input_type.sparse_tensor_type();
            const auto input_tensor_elem_type = input_tensor_type.elem_type();
            const auto current_tensor_elem_type = current_type.sparse_tensor_type().elem_type();
            if (input_tensor_elem_type != current_tensor_elem_type) {
                return FASTFLOW_MAKE_STATUS(FASTFLOW, FAIL, "SparseTensor element type mismatch. ",
                                               static_cast<TensorProto_DataType>(input_tensor_elem_type),
                                               " != ", static_cast<TensorProto_DataType>(current_tensor_elem_type));
            }
            if (input_tensor_type.has_shape()) {
                auto& current_tensor_type = *current_type.mutable_sparse_tensor_type();
                if (current_tensor_type.has_shape()) {
                    // TODO: Check if we need to merge shape here
                    // if so we'd need to provide merging routine ONNX
                    // mergeInShapeInfo(input_tensor_type, current_tensor_type);
                } else {
                    current_tensor_type = input_tensor_type;
                }
            }
        } break;
        case TypeProto::kSequenceType:
        case TypeProto::kMapType:
        case TypeProto::kOpaqueType:
        case TypeProto::VALUE_NOT_SET:
            break;
    }

    return Status::OK();
}

Status NodeArg::UpdateTypeAndShape(const NodeArg& node_arg) {
    auto status = Status::OK();

    if (node_arg.node_arg_info_.has_type())
        status = UpdateTypeAndShape(node_arg.node_arg_info_.type());

    return status;
}

void NodeArg::SetType(ONNX_NAMESPACE::DataType p_type) {
    if (nullptr == p_type) {
        return;
    }

    type_ = p_type;
    *(node_arg_info_.mutable_type()) = DataTypeUtils::ToTypeProto(p_type);
}

void NodeArg::SetType(const TypeProto& type_proto) {
    type_ = DataTypeUtils::ToType(type_proto);
    *(node_arg_info_.mutable_type()) = type_proto;
}

bool NodeArg::Exists() const noexcept {
    return exists_;
}

}  // namespace fastflow
