////////////////////////////////////////////////////////////////////////////////////////////////////
//
//  Project:  Embedded Learning Library (ELL)
//  File:     BinaryFunctionNode.h (nodes)
//  Authors:  Lisa Ong
//
////////////////////////////////////////////////////////////////////////////////////////////////////

#pragma once

#include <model/include/CompilableNode.h>
#include <model/include/CompilableNodeUtilities.h>
#include <model/include/IRMapCompiler.h>
#include <model/include/MapCompiler.h>
#include <model/include/Model.h>
#include <model/include/ModelTransformer.h>
#include <model/include/Node.h>

#include <emitters/include/EmitterTypes.h>
#include <emitters/include/LLVMUtilities.h>

#include <utilities/include/ArchiveVersion.h>
#include <utilities/include/Exception.h>
#include <utilities/include/IArchivable.h>

#include <string>
#include <vector>

namespace ell
{
namespace nodes
{
    /// <summary> A node that performs a coordinatewise binary operation on its inputs ('binary' in the sense that there are 2 inputs). </summary>
    template <typename ValueType, typename FunctionType>
    class BinaryFunctionNode : public model::CompilableNode
    {
    public:
        /// @name Input and Output Ports
        /// @{
        const model::InputPort<ValueType>& input1 = _input1;
        const model::InputPort<ValueType>& input2 = _input2;
        const model::OutputPort<ValueType>& output = _output;
        /// @}

        /// <summary> Default Constructor </summary>
        BinaryFunctionNode();

        /// <summary> Constructor. </summary>
        ///
        /// <param name="input1"> The left-hand input of the function. </param>
        /// <param name="input2"> The right-hand input of the function. </param>
        /// <param name="function"> The function to apply coordinate-wise. </param>
        /// <param name="padding"> The padding value. </param>
        BinaryFunctionNode(const model::OutputPort<ValueType>& input1, const model::OutputPort<ValueType>& input2, FunctionType function, ValueType padding = 0);

        /// <summary> Constructor. </summary>
        ///
        /// <param name="input1"> The left-hand input of the function. </param>
        /// <param name="input2"> The right-hand input of the function. </param>
        /// <param name="layout"> The layout for both inputs and the output. </param>
        /// <param name="function"> The function to apply coordinate-wise. </param>
        /// <param name="padding"> The padding value. </param>
        BinaryFunctionNode(const model::OutputPort<ValueType>& input1, const model::OutputPort<ValueType>& input2, const model::PortMemoryLayout& inputLayout, FunctionType function, ValueType padding = 0);

        /// <summary> Constructor. </summary>
        ///
        /// <param name="input1"> The left-hand input of the function. </param>
        /// <param name="input2"> The right-hand input of the function. </param>
        /// <param name="inputLayout"> The layout for both inputs. </param>
        /// <param name="outputLayout"> The output layout. </param>
        /// <param name="function"> The function to apply coordinate-wise. </param>
        /// <param name="padding"> The padding value. </param>
        BinaryFunctionNode(const model::OutputPort<ValueType>& input1, const model::OutputPort<ValueType>& input2, const model::PortMemoryLayout& inputLayout, const model::PortMemoryLayout& outputLayout, FunctionType function, ValueType padding = 0);

        /// <summary> Gets information about the input memory layout </summary>
        const model::PortMemoryLayout& GetInputMemoryLayout() const { return _inputLayout; }

        /// <summary> Gets information about the input memory layout </summary>
        model::PortMemoryLayout GetOutputMemoryLayout() const { return _output.GetMemoryLayout(); }

        /// <summary> Gets the name of this type (for serialization). </summary>
        ///
        /// <returns> The name of this type. </returns>
        static std::string GetTypeName() { return utilities::GetCompositeTypeName<ValueType, FunctionType>("BinaryFunctionNode"); }

        /// <summary> Gets the name of this type (for serialization). </summary>
        ///
        /// <returns> The name of this type. </returns>
        std::string GetRuntimeTypeName() const override { return GetTypeName(); }

        /// <summary> Returns true if the node can accept input with this memory layout order, else false </summary>
        ///
        /// <param name="order"> The memory layout order for all the input ports </summary>
        /// <returns> If the node can accept the input memory layout order, true, else false </returns>
        bool CanAcceptInputLayout(const utilities::DimensionOrder& order) const override
        {
            return GetInputMemoryLayout().GetLogicalDimensionOrder() == order;
        }

    protected:
        void Compute() const override;
        void Compile(model::IRMapCompiler& compiler, emitters::IRFunctionEmitter& function) override;
        ell::utilities::ArchiveVersion GetArchiveVersion() const override;
        void WriteToArchive(utilities::Archiver& archiver) const override;
        void ReadFromArchive(utilities::Unarchiver& archiver) override;
        bool HasState() const override { return true; } // stored state: paddingValue

    private:
        void Copy(model::ModelTransformer& transformer) const override;

        void ComputeDimensionLoop(size_t dimension,
                                  std::vector<ValueType>& output,
                                  size_t prevInputDimensionOffset,
                                  size_t prevOutputDimensionOffset) const;

        void EmitComputeDimensionLoop(model::IRMapCompiler& compiler,
                                      emitters::IRFunctionEmitter& function,
                                      size_t dimension,
                                      emitters::LLVMValue input1,
                                      emitters::LLVMValue input2,
                                      emitters::LLVMValue output,
                                      emitters::LLVMValue prevInputDimensionOffset,
                                      emitters::LLVMValue prevOutputDimensionOffset) const;

        // Inputs
        model::InputPort<ValueType> _input1;
        model::InputPort<ValueType> _input2;
        model::PortMemoryLayout _inputLayout;

        // Output
        model::OutputPort<ValueType> _output;

        // Function to apply coordinate-wise
        FunctionType _function;

        ValueType _paddingValue;
    };
} // namespace nodes
} // namespace ell

#pragma region implementation

namespace ell
{
namespace nodes
{
    template <typename ValueType, typename FunctionType>
    BinaryFunctionNode<ValueType, FunctionType>::BinaryFunctionNode() :
        CompilableNode({ &_input1, &_input2 }, { &_output }),
        _input1(this, {}, defaultInput1PortName),
        _input2(this, {}, defaultInput2PortName),
        _output(this, defaultOutputPortName, 0),
        _paddingValue(0)
    {
    }

    template <typename ValueType, typename FunctionType>
    BinaryFunctionNode<ValueType, FunctionType>::BinaryFunctionNode(const model::OutputPort<ValueType>& input1, const model::OutputPort<ValueType>& input2, FunctionType function, ValueType padding) :
        BinaryFunctionNode(input1, input2, input1.GetMemoryLayout(), function, padding)
    {
    }

    template <typename ValueType, typename FunctionType>
    BinaryFunctionNode<ValueType, FunctionType>::BinaryFunctionNode(const model::OutputPort<ValueType>& input1, const model::OutputPort<ValueType>& input2, const model::PortMemoryLayout& layout, FunctionType function, ValueType padding) :
        BinaryFunctionNode(input1, input2, input1.GetMemoryLayout(), input1.GetMemoryLayout(), function, padding)
    {
    }

    template <typename ValueType, typename FunctionType>
    BinaryFunctionNode<ValueType, FunctionType>::BinaryFunctionNode(const model::OutputPort<ValueType>& input1, const model::OutputPort<ValueType>& input2, const model::PortMemoryLayout& inputLayout, const model::PortMemoryLayout& outputLayout, FunctionType function, ValueType padding) :
        CompilableNode({ &_input1, &_input2 }, { &_output }),
        _input1(this, input1, defaultInput1PortName),
        _input2(this, input2, defaultInput2PortName),
        _inputLayout(inputLayout),
        _output(this, defaultOutputPortName, outputLayout),
        _function(std::move(function)),
        _paddingValue(padding)
    {
        if (input1.Size() != input2.Size())
        {
            throw utilities::InputException(utilities::InputExceptionErrors::invalidArgument, "Input sizes must match");
        }

        if (inputLayout.GetActiveSize() != outputLayout.GetActiveSize())
        {
            throw utilities::InputException(utilities::InputExceptionErrors::invalidArgument,
                                            ell::utilities::FormatString("Input 1 active area size %d doesn't match input 2 active area size %d on BinaryFunctionNode %s",
                                                                         inputLayout.GetActiveSize().NumElements(),
                                                                         outputLayout.GetActiveSize().NumElements(),
                                                                         GetId().ToString().c_str()));
        }
    }

    template <typename ValueType, typename FunctionType>
    void BinaryFunctionNode<ValueType, FunctionType>::Compute() const
    {
        auto outputLayout = _output.GetMemoryLayout();
        auto outputSize = outputLayout.GetExtent().NumElements();
        auto output = std::vector<ValueType>(outputSize);

        const size_t prevInputOffset = 0;
        const size_t prevOutputOffset = 0;
        ComputeDimensionLoop(0, output, prevInputOffset, prevOutputOffset);

        _output.SetOutput(output);
    }

    //
    // Arbitrary-depth nested loops are generated recursively. The ComputeDimensionLoop
    // function emits `numDimensions` nested loops of the form:
    //
    // for(iz = 0; iz < sz; ++iz)
    // {
    //     zOffset = (iz+offset[2]) * stride[2];
    //     for(iy = 0; iy < sy; ++iy)
    //     {
    //         yOffset = zOffset + (iy+offset[1]) * stride[1];
    //         for(ix = 0; ix < sx; ++ix)
    //         {
    //             offset = yOffset + (ix+offset[0]) * stride[0];
    //             x = arr[offset];
    //             val = f(x);
    //             output[offset] = val;
    //         }
    //     }
    // }
    //

    template <typename ValueType, typename FunctionType>
    void BinaryFunctionNode<ValueType, FunctionType>::ComputeDimensionLoop(size_t dimension,
                                                                           std::vector<ValueType>& output,
                                                                           size_t prevInputDimensionOffset,
                                                                           size_t prevOutputDimensionOffset) const
    {
        auto outputLayout = _output.GetMemoryLayout();
        const auto numDimensions = _inputLayout.NumDimensions();
        auto&& inputStride = _inputLayout.GetExtent();
        auto&& inputOffset = _inputLayout.GetOffset();
        auto&& inputSize = _inputLayout.GetActiveSize();
        auto&& outputOffset = outputLayout.GetOffset();
        auto&& outputStride = outputLayout.GetExtent();

        for (int loopIndex = 0; loopIndex < inputSize[dimension]; ++loopIndex)
        {
            // offset within start of this dimension = (loopIndex + offset[dimension])
            auto thisInputDimensionInternalOffset = loopIndex + inputOffset[dimension];
            auto thisOutputDimensionInternalOffset = loopIndex + outputOffset[dimension];

            size_t thisInputDimensionOffset = thisInputDimensionInternalOffset;
            size_t thisOutputDimensionOffset = thisOutputDimensionInternalOffset;
            if (dimension != 0)
            {
                thisInputDimensionOffset += prevInputDimensionOffset * inputStride[dimension];
                thisOutputDimensionOffset += prevOutputDimensionOffset * outputStride[dimension];
            }

            if (static_cast<int>(dimension) < numDimensions - 1)
            {
                // Recursive call to emit nested loop
                ComputeDimensionLoop(dimension + 1, output, thisInputDimensionOffset, thisOutputDimensionOffset);
            }
            else
            {
                // We're in the innermost loop --- compute the value
                auto value1 = _input1[thisInputDimensionOffset];
                auto value2 = _input2[thisInputDimensionOffset];
                auto outputValue = _function.Compute(value1, value2);
                output[thisOutputDimensionOffset] = outputValue;
            }
        }
    }

    template <typename ValueType, typename FunctionType>
    void BinaryFunctionNode<ValueType, FunctionType>::Compile(model::IRMapCompiler& compiler,
                                                              emitters::IRFunctionEmitter& function)
    {
        emitters::LLVMValue pInput1 = compiler.EnsurePortEmitted(input1);
        emitters::LLVMValue pInput2 = compiler.EnsurePortEmitted(input2);
        emitters::LLVMValue pResult = compiler.EnsurePortEmitted(output, _paddingValue);

        // Call recursive function to emit nested loops
        emitters::LLVMValue prevInputDimensionOffset = nullptr;
        emitters::LLVMValue prevOutputDimensionOffset = nullptr;
        EmitComputeDimensionLoop(compiler, function, 0, pInput1, pInput2, pResult, prevInputDimensionOffset, prevOutputDimensionOffset);
    }

    template <typename ValueType, typename FunctionType>
    void BinaryFunctionNode<ValueType, FunctionType>::EmitComputeDimensionLoop(model::IRMapCompiler& compiler,
                                                                               emitters::IRFunctionEmitter& function,
                                                                               size_t dimension,
                                                                               emitters::LLVMValue input1,
                                                                               emitters::LLVMValue input2,
                                                                               emitters::LLVMValue output,
                                                                               emitters::LLVMValue prevInputDimensionOffset,
                                                                               emitters::LLVMValue prevOutputDimensionOffset) const
    {
        auto outputLayout = _output.GetMemoryLayout();
        const auto numDimensions = _inputLayout.NumDimensions();
        auto&& inputStride = _inputLayout.GetExtent();
        auto&& inputOffset = _inputLayout.GetOffset();
        auto&& inputSize = _inputLayout.GetActiveSize();
        auto&& outputStride = outputLayout.GetExtent();
        auto&& outputOffset = outputLayout.GetOffset();

        function.For(inputSize[dimension], [dimension, numDimensions, inputOffset, inputStride, outputOffset, outputStride, prevInputDimensionOffset, prevOutputDimensionOffset, input1, input2, output, &compiler, this](emitters::IRFunctionEmitter& function, emitters::LLVMValue loopIndex) {
            // Calculate the offset within this dimension = (loopIndex + offset[dimension])
            emitters::LLVMValue thisInputDimensionInternalOffset = function.Operator(emitters::GetAddForValueType<int>(), loopIndex, function.Literal<int>(inputOffset[dimension]));
            emitters::LLVMValue thisOutputDimensionInternalOffset = function.Operator(emitters::GetAddForValueType<int>(), loopIndex, function.Literal<int>(outputOffset[dimension]));

            // Calculate the total offset from beginning of memory:
            //   * if in the outermost loop, the offset into this dimension
            //   * otherwise, the offset into this dimension plus the previous offset scaled by the previous dimension's stride
            emitters::LLVMValue thisInputDimensionOffset = nullptr;
            emitters::LLVMValue thisOutputDimensionOffset = nullptr;
            if (dimension == 0)
            {
                assert(prevInputDimensionOffset == nullptr);
                assert(prevOutputDimensionOffset == nullptr);
                thisInputDimensionOffset = thisInputDimensionInternalOffset;
                thisOutputDimensionOffset = thisOutputDimensionInternalOffset;
            }
            else
            {
                auto scaledInputDimensionOffset = function.Operator(emitters::GetMultiplyForValueType<int>(), prevInputDimensionOffset, function.Literal<int>(inputStride[dimension]));
                thisInputDimensionOffset = function.Operator(emitters::GetAddForValueType<int>(), scaledInputDimensionOffset, thisInputDimensionInternalOffset);

                auto scaledOutputDimensionOffset = function.Operator(emitters::GetMultiplyForValueType<int>(), prevOutputDimensionOffset, function.Literal<int>(outputStride[dimension]));
                thisOutputDimensionOffset = function.Operator(emitters::GetAddForValueType<int>(), scaledOutputDimensionOffset, thisOutputDimensionInternalOffset);
            }

            if (static_cast<int>(dimension) < numDimensions - 1)
            {
                // Recursive call to emit nested loop
                EmitComputeDimensionLoop(compiler, function, dimension + 1, input1, input2, output, thisInputDimensionOffset, thisOutputDimensionOffset);
            }
            else
            {
                // We're in the innermost loop --- compute the value
                auto value1 = function.ValueAt(input1, thisInputDimensionOffset);
                auto value2 = function.ValueAt(input2, thisInputDimensionOffset);
                auto outputValue = _function.Compile(function, value1, value2);
                function.SetValueAt(output, thisOutputDimensionOffset, outputValue);
            }
        });
    }

    template <typename ValueType, typename FunctionType>
    void BinaryFunctionNode<ValueType, FunctionType>::Copy(model::ModelTransformer& transformer) const
    {
        auto outputLayout = _output.GetMemoryLayout();
        const auto& portElements1 = transformer.GetCorrespondingInputs(_input1);
        const auto& portElements2 = transformer.GetCorrespondingInputs(_input2);
        auto newNode = transformer.AddNode<BinaryFunctionNode<ValueType, FunctionType>>(portElements1, portElements2, _inputLayout, outputLayout, _function, _paddingValue);
        transformer.MapNodeOutput(output, newNode->output);
    }

    template <typename ValueType, typename FunctionType>
    ell::utilities::ArchiveVersion BinaryFunctionNode<ValueType, FunctionType>::GetArchiveVersion() const
    {
        return { ell::utilities::ArchiveVersionNumbers::v8_port_memory_layout };
    }

    template <typename ValueType, typename FunctionType>
    void BinaryFunctionNode<ValueType, FunctionType>::WriteToArchive(utilities::Archiver& archiver) const
    {
        model::CompilableNode::WriteToArchive(archiver);
        archiver[defaultInput1PortName] << _input1;
        archiver[defaultInput2PortName] << _input2;
        archiver["paddingValue"] << _paddingValue;
        archiver["inputLayout"] << _inputLayout;
        archiver["outputLayout"] << _output.GetMemoryLayout();
    }

    template <typename ValueType, typename FunctionType>
    void BinaryFunctionNode<ValueType, FunctionType>::ReadFromArchive(utilities::Unarchiver& archiver)
    {
        model::CompilableNode::ReadFromArchive(archiver);
        archiver[defaultInput1PortName] >> _input1;
        archiver[defaultInput2PortName] >> _input2;
        archiver["paddingValue"] >> _paddingValue;
        archiver["inputLayout"] >> _inputLayout;
        model::PortMemoryLayout outputLayout;
        archiver["outputLayout"] >> outputLayout;
        _output.SetMemoryLayout(outputLayout);
    }
} // namespace nodes
} // namespace ell

#pragma endregion implementation
