// The MIT License (MIT)
// Copyright (c) <2023> <caofx copyright holders>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.

#include <iostream>
#include <string>
#include <vector>
#include <memory>
#include <map>
#include <algorithm>

#include <stdint.h>
#include <math.h>
#include <stdio.h>

using namespace std;

struct Tensor {
    Tensor(const vector<uint32_t>& dims) {
        uint32_t sum = 1;
        for (auto dim: dims) {
            sum *= dim;
        }

        data.resize(sum);
        this->dims = dims;
    }

    void setData(float* d, uint32_t len) {
        data.assign(d, d + len);
    }

    vector<float> data;
    vector<uint32_t> dims;
};

ostream& operator<<(ostream& os, const Tensor& t)
{
    os << "Tensor data"  << ": [";
    for (uint32_t i = 0; i < t.data.size(); ++i) {
        os << (i == 0 ? "" : ", ") << std::to_string(t.data[i]);
    }
    os << "]";
    return os;
}

bool dot(const std::shared_ptr<Tensor>& l, const std::shared_ptr<Tensor>& r, std::shared_ptr<Tensor>& out) {
    // current only support left 2dims, right 1 dim, not memory safe!
    if (l->dims.size() != 2 || r->dims.size() != 1  || out->dims.size() != 1) {
        // printf("l: %d r: %d out: %d\n", l->dims.size(), r->dims.size(), out->dims.size());
        cout << "dims size error" << endl;
        return false;
    }

    size_t lRow = l->dims[0]; // exampe: 100
    size_t lCol = l->dims[1]; // exampe: 784
    size_t rCol = r->dims[0]; // exampe: 784
    size_t outRow = out->dims[0]; // exampe: 100

    if (lRow != outRow || lCol != rCol) {
        cout << "dim error" << endl;
        // printf("%d %d %d %d \n", lRow, lCol, rCol, outRow);
        return false;
    }

    for (size_t i = 0; i < lRow; ++i) {
        float tmp = 0;
        for (size_t j = 0; j < lCol; ++j) {
            tmp += l->data[i*lCol + j] * r->data[j];
        }

        out->data[i] = tmp;
    }

    return true;
}

bool sigmoid(const std::shared_ptr<Tensor>& in, std::shared_ptr<Tensor>& out) {
    // current only support  1 dim, not memory safe!
    if (in->dims.size() != 1 || out->dims.size() != 1) {
        cout << "dims size error" << endl;
        return false;
    }

    size_t inRow = in->dims[0]; // exampe: 100
    size_t outRow = out->dims[0]; // exampe: 100

    if (inRow != outRow) {
        cout << "dim error" << endl;
        return false;
    }

    for (size_t i = 0; i < inRow; ++i) {
        // out = 1 / (1 + numpy.exp(-x))  #scipy.special.expit(x)
        out->data[i] = ( 1.f / ( 1.f + exp(-(in->data[i]))));
    }

    return true;
}

class Graph;

class LayerBase {
public:
    virtual bool forward(Graph* ctx) = 0;
    virtual string& getInputTensorName() = 0;
    virtual string& getOutputTensorName() = 0;
};

class DenseLayer : public LayerBase {
public:
    DenseLayer(const string& inputTensorName, const string& weightName, const string& outputTensorName,
               uint32_t inputNodes, uint32_t outputNodes) :
        m_inputTensorName(inputTensorName), m_weightName(weightName), m_outputTensorName(outputTensorName),
        m_inputNodes(inputNodes), m_outputNodes(outputNodes) {}

    bool forward(Graph* ctx) override;

    string& getInputTensorName() override {
        return m_inputTensorName;
    }

    string& getOutputTensorName() override {
        return m_outputTensorName;
    }

private:
    string m_inputTensorName;
    string m_weightName;
    string m_outputTensorName;

    uint32_t m_inputNodes;
    uint32_t m_outputNodes;
    bool m_debug = false;
};

class SigmoidLayer : public LayerBase {
public:
    SigmoidLayer(const string& inputTensorName, const string& outputTensorName) :
        m_inputTensorName(inputTensorName), m_outputTensorName(outputTensorName) {}

    bool forward(Graph* ctx) override;

    string& getInputTensorName() override {
        return m_inputTensorName;
    }

    string& getOutputTensorName() override {
        return m_outputTensorName;
    }

private:
    string m_inputTensorName;
    string m_outputTensorName;
    bool m_debug = false;
};

class  Graph {
public:
    Graph(uint32_t inputnodes, uint32_t hiddennodes, uint32_t outputnodes) {
        // inputTensor ->
        //                (denseLayer_1) -> tensor_1 -> (SigmoidLayer_2) ->  tensor_2 -> (denseLayer_2) ->  tensor_3 -> (SigmoidLayer_3) -> outputTensor
        // weight_0    ->                                                    weight_1 ->
        string inputTensorName("inputTensor");
        vector<uint32_t> inputTensorDims {inputnodes};
        auto inputTensor = make_shared<Tensor>(inputTensorDims);
        m_tensors.insert({inputTensorName, inputTensor});

        string weight_0Name("weight_0");
        vector<uint32_t> weight_0Dims {hiddennodes, inputnodes};
        auto weight_0 = make_shared<Tensor>(weight_0Dims);
        #include "inputLayer_weight.h"
        weight_0->setData((float*)inputLayer_weight_bin, inputLayer_weight_bin_len / sizeof(float));// setData
        m_tensors.insert({weight_0Name, weight_0});

        string tensor_1Name("tensor_1");
        vector<uint32_t> tensor_1Dims {hiddennodes};
        auto tensor_1 = make_shared<Tensor>(tensor_1Dims);
        m_tensors.insert({tensor_1Name, tensor_1});

        shared_ptr<LayerBase> denseLayer_1 = make_shared<DenseLayer>(inputTensorName, weight_0Name, tensor_1Name, inputnodes, hiddennodes);
        m_layers.push_back(denseLayer_1);

        //------------------------------------------
        string tensor_2Name("tensor_2");
        vector<uint32_t> tensor_2Dims {hiddennodes};
        auto tensor_2 = make_shared<Tensor>(tensor_2Dims);
        m_tensors.insert({tensor_2Name, tensor_2});

        shared_ptr<LayerBase> SigmoidLayer_2 = make_shared<SigmoidLayer>(tensor_1Name, tensor_2Name);
        m_layers.push_back(SigmoidLayer_2);

        //------------------------------------------
        string weight_1Name("weight_1");
        vector<uint32_t> weight_1Dims {outputnodes, hiddennodes};
        auto weight_1 = make_shared<Tensor>(weight_1Dims);
        #include "hiddenLayer_weight.h"
        weight_1->setData((float*)hiddenLayer_weight_bin, hiddenLayer_weight_bin_len / sizeof(float));// setData
        m_tensors.insert({weight_1Name, weight_1});

        string tensor_3Name("tensor_3");
        vector<uint32_t> tensor_3Dims {outputnodes};
        auto tensor_3 = make_shared<Tensor>(tensor_3Dims);
        m_tensors.insert({tensor_3Name, tensor_3});

        shared_ptr<LayerBase> denseLayer_2 = make_shared<DenseLayer>(tensor_2Name, weight_1Name, tensor_3Name,  hiddennodes, outputnodes);
        m_layers.push_back(denseLayer_2);

        //------------------------------------------
        string outputTensorName("outputTensor");
        vector<uint32_t> outputTensorDims {outputnodes};
        auto outputTensor = make_shared<Tensor>(outputTensorDims);
        m_tensors.insert({outputTensorName, outputTensor});

        shared_ptr<LayerBase> SigmoidLayer_3 = make_shared<SigmoidLayer>(tensor_3Name, outputTensorName);
        m_layers.push_back(SigmoidLayer_3);

        m_inputTensorName = inputTensorName;
        m_outputTensorName = outputTensorName;
    }

    shared_ptr<Tensor>& getInputTensor() {
        return m_tensors[m_inputTensorName];
    }

    shared_ptr<Tensor>& getOutputTensor() {
        return m_tensors[m_outputTensorName];
    }

    bool inference() {
        for (auto& layer: m_layers) {
            bool sucess = layer->forward(this);
            if (!sucess) {
                cout << "Error: layer forward falied!" << endl;
                return false;
            }
        }

        return true;
    }

private:
    map<string, shared_ptr<Tensor>> m_tensors;
    vector<shared_ptr<LayerBase>> m_layers; // sorted layers

    string m_inputTensorName;
    string m_outputTensorName;

    friend class DenseLayer;
    friend class SigmoidLayer;
};

bool DenseLayer::forward(Graph* ctx) {
    cout << m_inputTensorName << " | " << m_weightName << " -> (DenseLayer) -> " << m_outputTensorName << endl;
    auto& inputTensor = ctx->m_tensors[m_inputTensorName];
    auto& weightTensor = ctx->m_tensors[m_weightName];
    auto& outputTensor = ctx->m_tensors[m_outputTensorName];
    return dot(weightTensor, inputTensor, outputTensor);
}

bool SigmoidLayer::forward(Graph* ctx) {
    cout << m_inputTensorName << " -> (SigmoidLayer) -> " << m_outputTensorName << endl;
    auto inputTensor = ctx->m_tensors[m_inputTensorName];
    auto outputTensor = ctx->m_tensors[m_outputTensorName];

    return sigmoid(inputTensor, outputTensor);
}

int main() {
    Graph graph(784, 100, 10);
    auto& inputTensor = graph.getInputTensor();
    auto& outputTensor = graph.getOutputTensor();

    cout << "inputTensor size: " << inputTensor->data.size() << endl;
    cout << "outputTensor size: " << outputTensor->data.size() << endl;
    cout << "---------------------------------------------------------------" << endl;

    #include "test_input_data.h"
    {
        vector<float> digit;
        std::transform(std::begin(arr_2), std::end(arr_2), std::back_inserter(digit),
                   [](uint8_t c) -> float { return c / 255.0 * 0.99 + 0.01; });
        inputTensor->setData(digit.data(), digit.size());// setData

        graph.inference();

        // print output
        cout << *outputTensor << endl;
    }
    cout << "---------------------------------------------------------------" << endl;
    {
        vector<float> digit;
        std::transform(std::begin(arr_7), std::end(arr_7), std::back_inserter(digit),
                   [](uint8_t c) -> float { return c / 255.0 * 0.99 + 0.01; });
        inputTensor->setData(digit.data(), digit.size());// setData

        graph.inference();

        // print output
        cout << *outputTensor << endl;
    }
    cout << "---------------------------------------------------------------" << endl;

    return 0;
}
