#include "dl_layer_model.hpp"
#include "dl_layer_base.hpp"
#include "dl_layer_reshape.hpp"
#include "dl_layer_conv2d.hpp"
#include "dl_variable.hpp"
#include "dl_layer_max_pool2d.hpp"
#include "dl_layer_flatten.hpp"
#include "dl_layer_fullyconnected.hpp"
#include "mnist_coefficient.hpp"
#include <stdint.h>


using namespace dl::layer;
using namespace dl;
using namespace mnist_coefficient;

class MNIST : public Model<int16_t> // Derive the Model class in "dl_layer_model.hpp"
{
private:
    // Declare layers as member variables
    Reshape<int16_t> l1;
    Conv2D<int16_t> l2;
    Conv2D<int16_t> l3;
    MaxPool2D<int16_t> l4;
    Flatten<int16_t> l5;
    Reshape<int16_t> l6;
    // Reshape<int16_t> l5;
    

public:
    // Conv2D<int16_t> l5;
    Conv2D<int16_t> l7;

    /**
     * @brief Initialize layers in constructor function
     * 
     */
    MNIST() : l1(Reshape<int16_t>({28,28,1})),
              l2(Conv2D<int16_t>(-14, get_conv__19_filter(), get_conv__19_bias(), get_conv__19_activation(), PADDING_VALID, {}, 1, 1, "l1")),
              l3(Conv2D<int16_t>(-13, get_conv__21_filter(), get_conv__21_bias(), get_conv__21_activation(), PADDING_VALID, {}, 1, 1, "l2")),
              l4(MaxPool2D<int16_t>({2, 2}, PADDING_VALID, {}, 2, 2, "l3")),
              l5(Flatten<int16_t>("l4")),
              l6(Reshape<int16_t>({1,1,9216}, "l5")),
              l7(Conv2D<int16_t>(-9, get_fused_gemm_0_filter(), get_fused_gemm_0_bias(), NULL , PADDING_VALID, {}, 1, 1, "l6")){}
    /**
     * @brief call each layers' build(...) function in sequence
     * 
     * @param input 
     */
    void build(Tensor<int16_t> &input)
    {
        this->l1.build(input);
        this->l1.get_output().print_shape();

        this->l2.build(this->l1.get_output());
        this->l2.get_output().print_shape();

        this->l3.build(this->l2.get_output());
        this->l3.get_output().print_shape();

        this->l4.build(this->l3.get_output());
        this->l4.get_output().print_shape();

        this->l5.build(this->l4.get_output());
        this->l5.get_output().print_shape();

        this->l6.build(this->l5.get_output());
        this->l6.get_output().print_shape();

        this->l7.build(this->l6.get_output());
        this->l7.get_output().print_shape();
        // this->l6.build(this->l5.get_output());
    }

    /**
     * @brief call each layers' call(...) function in sequence
     * 
     * @param input 
     */
    void call(Tensor<int16_t> &input)
    {
        this->l1.call(input);
        input.free_element();

        this->l2.call(this->l1.get_output());
        this->l1.get_output().free_element();

        this->l3.call(this->l2.get_output());
        this->l2.get_output().free_element();

        this->l4.call(this->l3.get_output());
        this->l3.get_output().free_element();

        this->l5.call(this->l4.get_output());
        this->l4.get_output().free_element();

        this->l6.call(this->l5.get_output());
        this->l5.get_output().free_element();

        this->l7.call(this->l6.get_output());
        this->l6.get_output().free_element();
    }
};

