#pragma once

#include <model/include/InputPort.h>
#include <model/include/Model.h>
#include <model/include/Node.h>

#include <nodes/include/NeuralNetworkPredictorNode.h>

#include <predictors/neural/include/ActivationLayer.h>
#include <predictors/neural/include/BinaryConvolutionalLayer.h>
#include <predictors/neural/include/ConvolutionalLayer.h>
#include <predictors/neural/include/PoolingLayer.h>
#include <predictors/neural/include/ScalingLayer.h>

#include <memory>
#include <vector>

namespace ell
{
struct NameValue
{
    std::string name;
    std::string value;
};
} // namespace ell

#pragma region implementation

namespace ell
{
static std::string PaddingSchemeToString(ell::predictors::neural::PaddingScheme scheme)
{
    switch (scheme)
    {
    case ell::predictors::neural::PaddingScheme::zeros:
        return "zeros";
    case ell::predictors::neural::PaddingScheme::minusOnes:
        return "minusOnes";
    case ell::predictors::neural::PaddingScheme::alternatingZeroAndOnes:
        return "alternatingZeroAndOnes";
    case ell::predictors::neural::PaddingScheme::randomZeroAndOnes:
        return "randomZeroAndOnes";
    case ell::predictors::neural::PaddingScheme::min:
        return "min";
    case ell::predictors::neural::PaddingScheme::max:
        return "max";
    }
    return "";
}

static std::string ConvolutionMethodToString(ell::predictors::neural::ConvolutionMethod method)
{
    switch (method)
    {
        /// <summary> Normal method of doing convolution via reshaping input into columns and performing a gemm operation. </summary>
    case ell::predictors::neural::ConvolutionMethod::automatic:
        return "automatic";
    case ell::predictors::neural::ConvolutionMethod::diagonal:
        return "diagonal";
    case ell::predictors::neural::ConvolutionMethod::simple:
        return "simple";
    case ell::predictors::neural::ConvolutionMethod::winograd:
        return "winograd";
    case ell::predictors::neural::ConvolutionMethod::unrolled:
        return "unrolled";
    }
    return "";
}
static std::string BinaryConvolutionMethodToString(ell::predictors::neural::BinaryConvolutionMethod method)
{
    switch (method)
    {
        /// <summary> Normal method of doing convolution via reshaping input into columns and performing a gemm operation. </summary>
    case ell::predictors::neural::BinaryConvolutionMethod::gemm:
        return "gemm";
    case ell::predictors::neural::BinaryConvolutionMethod::bitwise:
        return "bitwise";
    };
    return "";
}
static std::string BinaryWeightsScaleToString(ell::predictors::neural::BinaryWeightsScale method)
{
    switch (method)
    {
        /// <summary> Normal method of doing convolution via reshaping input into columns and performing a gemm operation. </summary>
    case ell::predictors::neural::BinaryWeightsScale::none:
        return "none";
    case ell::predictors::neural::BinaryWeightsScale::mean:
        return "mean";
    };
    return "";
}

template <typename ElementType>
std::vector<NameValue> InspectActivationLayerParameters(const ell::predictors::neural::ActivationLayer<ElementType>* layer)
{
    std::vector<NameValue> result;
    auto impl = layer->GetActivationFunction().GetImpl();
    if (impl)
    {
        result.push_back(NameValue{ "activation", impl->GetRuntimeTypeName() });
    }
    return result;
}

template <typename ElementType>
std::vector<NameValue> InspectBinaryConvolutionalLayerParameters(const ell::predictors::neural::BinaryConvolutionalLayer<ElementType>* layer)
{
    std::vector<NameValue> result;
    auto params = layer->GetConvolutionalParameters();
    result.push_back(NameValue{ "stride", std::to_string(params.stride) });
    result.push_back(NameValue{ "method", BinaryConvolutionMethodToString(params.method) });
    result.push_back(NameValue{ "receptiveField", std::to_string(params.receptiveField) });
    result.push_back(NameValue{ "weightsScale", BinaryWeightsScaleToString(params.weightsScale) });
    return result;
}

template <typename ElementType>
std::vector<NameValue> InspectConvolutionalLayerParameters(const ell::predictors::neural::ConvolutionalLayer<ElementType>* layer)
{
    std::vector<NameValue> result;
    auto params = layer->GetConvolutionalParameters();
    auto weights = layer->GetWeights();
    result.push_back(NameValue{ "stride", std::to_string(params.stride) });
    result.push_back(NameValue{ "method", ConvolutionMethodToString(params.method) });
    result.push_back(NameValue{ "receptiveField", std::to_string(params.receptiveField) });
    result.push_back(NameValue{ "isSeparable", std::to_string(weights.NumChannels() == 1) });
    return result;
}

template <typename ElementType, template <typename> class PoolingFunctionType>
std::vector<NameValue> InspectPoolingLayerParameters(const ell::predictors::neural::PoolingLayer<ElementType, PoolingFunctionType>* layer)
{
    std::vector<NameValue> result;
    auto params = layer->GetPoolingParameters();
    result.push_back(NameValue{ "stride", std::to_string(params.stride) });
    result.push_back(NameValue{ "size", std::to_string(params.poolingSize) });
    return result;
}

template <typename ElementType>
std::vector<NameValue> InspectLayerParameters(const ell::predictors::neural::Layer<ElementType>& layer)
{
    std::vector<NameValue> result;
    auto params = layer.GetLayerParameters();

    auto inputpadding = params.inputPaddingParameters;
    auto outputpadding = params.outputPaddingParameters;
    if (inputpadding.paddingSize == 0)
    {
        result.push_back(NameValue{ "inputPadding", "none,0" });
    }
    else
    {
        result.push_back(NameValue{ "inputPadding", PaddingSchemeToString(inputpadding.paddingScheme) + "," + std::to_string(inputpadding.paddingSize) });
    }

    if (outputpadding.paddingSize == 0)
    {
        result.push_back(NameValue{ "outputPadding", "none,0" });
    }
    else
    {
        result.push_back(NameValue{ "outputPadding", PaddingSchemeToString(outputpadding.paddingScheme) + "," + std::to_string(outputpadding.paddingSize) });
    }

    const ell::predictors::neural::ActivationLayer<ElementType>* act = dynamic_cast<const ell::predictors::neural::ActivationLayer<ElementType>*>(&layer);
    if (act != nullptr)
    {
        std::vector<NameValue> more = InspectActivationLayerParameters<ElementType>(act);
        result.insert(result.end(), more.begin(), more.end());
    }

    const ell::predictors::neural::BinaryConvolutionalLayer<ElementType>* bcl = dynamic_cast<const ell::predictors::neural::BinaryConvolutionalLayer<ElementType>*>(&layer);
    if (bcl != nullptr)
    {
        std::vector<NameValue> more = InspectBinaryConvolutionalLayerParameters<ElementType>(bcl);
        result.insert(result.end(), more.begin(), more.end());
    }

    const ell::predictors::neural::ConvolutionalLayer<ElementType>* conv = dynamic_cast<const ell::predictors::neural::ConvolutionalLayer<ElementType>*>(&layer);
    if (conv != nullptr)
    {
        std::vector<NameValue> more = InspectConvolutionalLayerParameters<ElementType>(conv);
        result.insert(result.end(), more.begin(), more.end());
    }

    const ell::predictors::neural::PoolingLayer<ElementType, ell::predictors::neural::MaxPoolingFunction>* maxpooling = dynamic_cast<const ell::predictors::neural::PoolingLayer<ElementType, ell::predictors::neural::MaxPoolingFunction>*>(&layer);
    if (maxpooling != nullptr)
    {
        result.push_back(NameValue{ "function", "maxpooling" });
        std::vector<NameValue> more = InspectPoolingLayerParameters<ElementType, ell::predictors::neural::MaxPoolingFunction>(maxpooling);
        result.insert(result.end(), more.begin(), more.end());
    }

    const ell::predictors::neural::PoolingLayer<ElementType, ell::predictors::neural::MeanPoolingFunction>* meanpooling = dynamic_cast<const ell::predictors::neural::PoolingLayer<ElementType, ell::predictors::neural::MeanPoolingFunction>*>(&layer);
    if (meanpooling != nullptr)
    {
        result.push_back(NameValue{ "function", "meanpooling" });
        std::vector<NameValue> more = InspectPoolingLayerParameters<ElementType, ell::predictors::neural::MeanPoolingFunction>(meanpooling);
        result.insert(result.end(), more.begin(), more.end());
    }

    return result;
}
} // namespace ell

#pragma endregion implementation
