#include "eigen/conv.h"
#include "eigen/normal.h"
#include "tools/range.h"
#include "log.h"
#include "eigen/padding.h"

namespace  ldl_eigen
{
Conv::Conv(int64_t img_width, int64_t img_height, int64_t kernel_size, int64_t stride, int64_t padding)
{
    m_img_width = img_width;
    m_img_height = img_height;
    m_kernel = Normal::matrix(kernel_size, kernel_size, 0.0f, 0.01);
    m_kernel_gradient.resize(kernel_size, kernel_size);
    m_bias = 0;
    m_stride = stride;
    m_padding = padding;

    m_padding_img_width = m_img_width + 2 * m_padding;
    m_padding_img_height = m_img_height + 2*padding;
    m_row_sliding_max = m_padding_img_height - m_kernel.rows() + 1;
    m_col_sliding_max = m_padding_img_width - m_kernel.cols() + 1;
    m_output_height = (m_row_sliding_max - 1) / m_stride + 1;
    m_output_width = (m_col_sliding_max - 1) / m_stride + 1;

}

void Conv::forward()
{
    // resize在output尺寸与目标尺寸不同时才会申请内存
    m_output.resize(m_ptr_input->rows(), m_output_height * m_output_width);

    int64_t output_row_index = 0;
    int64_t output_col_index = 0;
    for(int64_t batch_index = 0;batch_index < m_ptr_input->rows();batch_index++)
    {
        auto single_input = m_ptr_input->block(batch_index, 0, 1, m_img_height * m_img_width).reshaped(m_img_height, m_img_width).transpose();
        auto single_input_padding = Padding::add_padding(single_input, m_padding);
        // LogInfo() << "single_input: \n" << single_input;
        output_row_index = 0;
        for(int64_t row = 0; row < m_row_sliding_max; row += m_stride)
        {
            output_col_index = 0;
            for(int64_t col = 0; col < m_col_sliding_max; col += m_stride)
            {
                auto batch = single_input_padding.block(row, col, m_kernel.rows(), m_kernel.cols());
                // LogInfo() << "row * m_output_width + output_col_index: " << row * m_output_width + output_col_index;
                m_output(batch_index,output_row_index * m_output_width + output_col_index) = (batch.array() * m_kernel.array()).sum();
                output_col_index++;
            }
            output_row_index++;
        }
    }
    
    m_output.array() += m_bias;
}

void Conv::conv(const Eigen::Ref<Eigen::MatrixXf> &input, const Eigen::Ref<Eigen::MatrixXf> &kernel, float bias, int64_t stride, Eigen::Ref<Eigen::MatrixXf> output)
{
    int64_t output_row_index = 0;
    int64_t output_col_index = 0;

    auto row_sliding_max = input.rows() - kernel.rows() + 1;
    auto col_sliding_max = input.cols() - kernel.cols() + 1;
    auto output_height = (row_sliding_max - 1) / stride + 1;
    auto output_width = (col_sliding_max - 1) / stride + 1;
    output.reshaped(output_height, output_width);
    for(int64_t row = 0; row < row_sliding_max; row += stride)
    {
        output_col_index = 0;
        for(int64_t col = 0; col < col_sliding_max; col += stride)
        {
            auto batch = input.block(row, col, kernel.rows(), kernel.cols());
            output.reshaped(output_height, output_width)(output_row_index, output_col_index) = (batch.array() * kernel.array()).sum();
            output_col_index++;
        }
        output_row_index++;
    }

    output.array() += bias;
}

void Conv::backward()
{
    // 计算损失对卷积核的偏导
    for(int64_t batch_index = 0;batch_index < m_ptr_input->rows();batch_index++)
    {
        auto single_input = m_ptr_input->block(batch_index, 0, 1, m_img_height * m_img_width).reshaped(m_img_height, m_img_width).transpose();
        auto single_input_padding = Padding::add_padding(single_input, m_padding);
        auto single_output_gradient = m_ptr_output_gradient->block(batch_index, 0, 1, m_output_height * m_output_width).reshaped(m_output_height, m_output_width).transpose();
        // LogInfo() << "single_input: \n" << single_input;
        // LogInfo() << "single_output_gradient: \n" << single_output_gradient;
        for(int64_t row = 0; row < m_kernel.rows(); row++)
        {
            auto row_indices = Range::range<int64_t>(row, m_row_sliding_max + row, m_stride);
            for(int64_t col = 0; col < m_kernel.cols(); col++)
            {
                auto col_indices = Range::range<int64_t>(col, m_col_sliding_max + col, m_stride);
                m_kernel_gradient(row, col) = ((single_input_padding)(row_indices, col_indices).array() * single_output_gradient.array()).sum();
            }
        }
    }


    // 计算损失对偏置的偏导数
    m_bias_gradient = (*m_ptr_output_gradient).array().sum();

    // 计算损失对输入的偏导数
    {
        // 1. 先创建导数的卷积核
        Eigen::MatrixXf kernal_gradient = Eigen::MatrixXf::Zero(m_kernel.rows()+2*(m_padding_img_height-m_kernel.rows()), m_kernel.cols()+2*(m_padding_img_width-m_kernel.cols()));

        // 2. 将卷积核倒序
        Eigen::MatrixXf kernel_reverse = m_kernel.colwise().reverse().rowwise().reverse();
        // LogInfo() << "kernel_reverse: " << kernel_reverse;

        auto row_indices = Range::range<int64_t>(m_padding_img_height-m_kernel.rows(), m_padding_img_height, 1);
        // LogInfo() << "row_indices: "  << row_indices;
        auto col_indices = Range::range<int64_t>(m_padding_img_width-m_kernel.cols(), m_padding_img_width, 1);
        // LogInfo() << "col_indices: "  << col_indices;

        kernal_gradient(row_indices, col_indices) = kernel_reverse;
        // LogInfo() << "kernal_gradient: "  << kernal_gradient;

        // 计算损失对输入的偏导数
        m_input_gradient.resize(m_ptr_input->rows(), m_img_height*m_img_height);

        auto row_max = m_padding_img_height - 1;
        auto col_max = m_padding_img_width - 1;
        // auto input_d = m_img_height * m_img_width;
        auto output_d = m_output_height * m_output_width;
        for (int64_t row = m_padding; row < m_padding_img_height-m_padding; row++)
        {
            // auto gradient_row_indices = Range::range(row_max - row, row_max - row + m_img_height - m_kernel.rows() + 1, m_stride);
            auto row_start = row_max - row;
            if(row_start < (m_padding_img_height-m_kernel.rows()))
            {
                row_start = ((m_padding_img_height-m_kernel.rows())- row_start) % m_stride + (m_padding_img_height-m_kernel.rows());
            }
            auto row_end = std::min(row_max - row + m_padding_img_height - m_kernel.rows(), m_padding_img_height - 1);
            auto gradient_row_indices_seq = Eigen::seq(row_start, row_end, m_stride);

            auto output_row_start = 0;
            if(row >= m_kernel.rows())
            {
                output_row_start = (row - m_kernel.rows()) / m_stride + 1;
            }

            auto output_gradient_row_indices_seq = Eigen::seqN(output_row_start, gradient_row_indices_seq.size(), 1);
            // LogInfo() << "gradient_row_indices: " << gradient_row_indices;
            // LogInfo() << "gradient_row_indices_seq: " << gradient_row_indices_seq;
            for (int64_t col = m_padding; col < m_padding_img_width-m_padding; col++)
            {
                // auto gradient_col_indices = Range::range(col_max - col, col_max - col + m_padding_img_width - m_kernel.cols() + 1, m_stride);
                auto col_start = col_max - col;
                if(col_start < (m_padding_img_width-m_kernel.cols()))
                {
                    col_start = ((m_padding_img_width-m_kernel.cols()) - col_start) % m_stride + (m_padding_img_width-m_kernel.cols());
                }
                auto col_end = std::min(col_max - col + m_padding_img_width - m_kernel.cols(), m_padding_img_width - 1);
                auto gradient_col_indices_seq = Eigen::seq(col_start, col_end, m_stride);

                auto output_col_start = 0;
                if(col >= m_kernel.cols())
                {
                    output_col_start = (col - m_kernel.cols()) / m_stride + 1;
                }
                auto output_gradient_col_indices_seq = Eigen::seqN(output_col_start, gradient_col_indices_seq.size(), 1);
                auto input_gradient_index = (row) * m_padding_img_width + (col);
                for (int64_t batch_index = 0; batch_index < m_ptr_input->rows(); batch_index++)
                {
                    // auto single_input = m_ptr_input->block(batch_index, 0, 1, input_d).reshaped(m_img_height, m_img_width).transpose();
                    auto single_output_gradient = m_ptr_output_gradient->block(batch_index, 0, 1, output_d).reshaped(m_output_height, m_output_width).transpose();
                    // LogInfo() << "(row, col): (" << row << ", " << col << ")"; 
                    // LogInfo() << "output_row_start: " << output_row_start;
                    // LogInfo() << "output_col_start: " << output_col_start;
                    // LogInfo() << "kernal_gradient(gradient_row_indices_seq, gradient_col_indices_seq): \n" << kernal_gradient(gradient_row_indices_seq, gradient_col_indices_seq);
                    // LogInfo() << "single_output_gradient(output_gradient_row_indices_seq, output_gradient_col_indices_seq): \n" << single_output_gradient(output_gradient_row_indices_seq, output_gradient_col_indices_seq) << "\n";
                    m_input_gradient(batch_index, (row-m_padding) * m_img_width + (col-m_padding)) = (kernal_gradient(gradient_row_indices_seq, gradient_col_indices_seq).array() * single_output_gradient(output_gradient_row_indices_seq, output_gradient_col_indices_seq).array()).sum();
                }
            }
        }
        // m_input_gradient(0) = input_gradient.row(0).reshape(m_padding_img_height, m_padding_img_width).(Eigen::seq(m_padding, m_img_height+m_padding-1, 1), Eigen::seq(m_padding, m_img_width+m_padding-1, 1));
    }
    
}

void Conv::update()
{
    const double lr = 0.01;
    m_kernel -= lr * m_kernel_gradient;
    m_bias -= lr * m_bias_gradient;
}

void Conv::set_kernel(const Eigen::MatrixXf &kernel)
{
    m_kernel = kernel;
}

Eigen::MatrixXf& Conv::kernel_gradient()
{
    return m_kernel_gradient;
}

void Conv::set_bias(float bias)
{
    m_bias = bias;
}

float Conv::bias_gradient()
{
    return m_bias_gradient;
}
}