#include "eigen/average_pooling.h"
#include "eigen/padding.h"
#include "log.h"
#include "tools/range.h"

namespace  ldl_eigen
{
AveragePooling::AveragePooling(int64_t width, int64_t height, int64_t pooling_size, int64_t stride, int64_t padding)
{
    m_width = width;
    m_height = height;
    m_pooling_size = pooling_size;
    m_stride = stride;
    m_padding = padding;

    m_padding_width = m_width + 2 * m_padding;
    m_padding_height = m_height + 2 * m_padding;
    m_row_sliding_max = m_padding_height - m_pooling_size;
    m_col_sliding_max = m_padding_width - m_pooling_size;
    m_output_height = m_row_sliding_max / m_stride + 1;
    m_output_width = m_col_sliding_max / m_stride + 1;
}

void AveragePooling::forward()
{
    // resize在output尺寸与目标尺寸不同时才会申请内存
    m_output.resize(m_ptr_input->rows(), m_output_height * m_output_width);
    int64_t output_row_index = 0;
    int64_t output_col_index = 0;
    for(int64_t batch_index = 0;batch_index < m_ptr_input->rows();batch_index++)
    {
        auto single_input = m_ptr_input->block(batch_index, 0, 1, m_height * m_width).reshaped(m_height, m_width).transpose();
        auto single_input_padding = Padding::add_padding(single_input, m_padding);
        // LogInfo() << "single_input: \n" << single_input;
        output_row_index = 0;
        for(int64_t row = 0; row <= m_row_sliding_max; row += m_stride)
        {
            output_col_index = 0;
            for(int64_t col = 0; col <= m_col_sliding_max; col += m_stride)
            {
                auto batch = single_input_padding.block(row, col, m_pooling_size, m_pooling_size);
                // LogInfo() << "row * m_output_width + output_col_index: " << row * m_output_width + output_col_index;
                // LogInfo() << "batch: \n" << batch;
                // LogInfo() << "batch.mean: " << batch.mean();
                m_output(batch_index,output_row_index * m_output_width + output_col_index) = batch.mean();
                output_col_index++;
            }
            output_row_index++;
        }
    }
}

void AveragePooling::backward()
{
    // 计算损失对输入的偏导数
    {
        // 1. 先创建导数的卷积核
        Eigen::MatrixXf kernal_gradient = Eigen::MatrixXf::Zero(m_pooling_size+2*(m_padding_height-m_pooling_size), m_pooling_size+2*(m_padding_width-m_pooling_size));

        // 2. 将卷积核倒序
        Eigen::MatrixXf kernel_reverse = Eigen::MatrixXf::Ones(m_pooling_size,m_pooling_size);//m_kernel.colwise().reverse().rowwise().reverse();
        // LogInfo() << "kernel_reverse: " << kernel_reverse;

        auto row_indices = Range::range<int64_t>(m_padding_height-m_pooling_size, m_padding_height, 1);
        // LogInfo() << "row_indices: "  << row_indices;
        auto col_indices = Range::range<int64_t>(m_padding_width-m_pooling_size, m_padding_width, 1);
        // LogInfo() << "col_indices: "  << col_indices;

        kernal_gradient(row_indices, col_indices) = kernel_reverse;
        // LogInfo() << "kernal_gradient: "  << kernal_gradient;

        // 计算损失对输入的偏导数
        m_input_gradient.resize(m_ptr_input->rows(), m_height*m_height);

        auto row_max = m_padding_height - 1;
        auto col_max = m_padding_width - 1;
        // auto input_d = m_img_height * m_img_width;
        auto output_d = m_output_height * m_output_width;
        for (int64_t row = m_padding; row < m_padding_height-m_padding; row++)
        {
            // auto gradient_row_indices = Range::range(row_max - row, row_max - row + m_img_height - m_kernel.rows() + 1, m_stride);
            auto row_start = row_max - row;
            if(row_start < (m_padding_height-m_pooling_size))
            {
                row_start = ((m_padding_height-m_pooling_size)- row_start) % m_stride + (m_padding_height-m_pooling_size);
            }
            auto row_end = std::min(row_max - row + m_padding_height - m_pooling_size, m_padding_height - 1);
            auto gradient_row_indices_seq = Eigen::seq(row_start, row_end, m_stride);
            auto output_row_start = 0;
            if(row >= m_pooling_size)
            {
                output_row_start = (row - m_pooling_size) / m_stride + 1;
            }
            auto output_gradient_row_indices_seq = Eigen::seqN(output_row_start, gradient_row_indices_seq.size(), 1);
            // LogInfo() << "gradient_row_indices: " << gradient_row_indices;
            // LogInfo() << "gradient_row_indices_seq: " << gradient_row_indices_seq;
            for (int64_t col = m_padding; col < m_padding_width-m_padding; col++)
            {
                // auto gradient_col_indices = Range::range(col_max - col, col_max - col + m_padding_img_width - m_kernel.cols() + 1, m_stride);
                auto col_start = col_max - col;
                if(col_start < (m_padding_width-m_pooling_size))
                {
                    col_start = ((m_padding_width-m_pooling_size) - col_start) % m_stride + (m_padding_width-m_pooling_size);
                }
                auto col_end = std::min(col_max - col + m_padding_width - m_pooling_size, m_padding_width - 1);

                auto gradient_col_indices_seq = Eigen::seq(col_start, col_end, m_stride);
                auto output_col_start = 0;
                if(col >= m_pooling_size)
                {
                    output_col_start = (col - m_pooling_size) / m_stride + 1;
                }
                auto output_gradient_col_indices_seq = Eigen::seqN(output_col_start, gradient_col_indices_seq.size(), 1);
                auto input_gradient_index = (row) * m_padding_width + (col);
                for (int64_t batch_index = 0; batch_index < m_ptr_input->rows(); batch_index++)
                {
                    // auto single_input = m_ptr_input->block(batch_index, 0, 1, input_d).reshaped(m_img_height, m_img_width).transpose();
                    auto single_output_gradient = m_ptr_output_gradient->block(batch_index, 0, 1, output_d).reshaped(m_output_height, m_output_width).transpose();

                    // LogInfo() << "row*m_padding_img_width + col: " << row*m_padding_img_width + col;
                    // input_gradient(batch_index, input_gradient_index) = (kernal_gradient(gradient_row_indices_seq, gradient_col_indices_seq).array() * single_output_gradient(output_gradient_row_indices_seq, output_gradient_col_indices_seq).array()).sum();
                    // if((row >= m_padding) && (col >= m_padding) && (row < m_img_height+m_padding) && (col < m_img_width + m_padding))
                    m_input_gradient(batch_index, (row-m_padding) * m_width + (col-m_padding)) = ( single_output_gradient(output_gradient_row_indices_seq, output_gradient_col_indices_seq).array()).sum();
                    // LogInfo() << "(row, col): (" << row << ", " << col << ")"; 
                    // LogInfo() << "kernal_gradient(gradient_row_indices_seq, gradient_col_indices_seq): \n" << kernal_gradient(gradient_row_indices_seq, gradient_col_indices_seq);
                    // LogInfo() << "single_output_gradient(output_gradient_row_indices_seq, output_gradient_col_indices_seq): \n" << single_output_gradient(output_gradient_row_indices_seq, output_gradient_col_indices_seq) << "\n";
                    m_input_gradient(batch_index, (row-m_padding) * m_width + (col-m_padding)) /= (m_pooling_size * m_pooling_size);
                }
            }
        }
        // m_input_gradient(0) = input_gradient.row(0).reshape(m_padding_img_height, m_padding_img_width).(Eigen::seq(m_padding, m_img_height+m_padding-1, 1), Eigen::seq(m_padding, m_img_width+m_padding-1, 1));
    }
}
}


// [[1.         1.88888889 0.88888889 1.66666667 0.77777778]
//  [1.66666667 3.11111111 1.44444444 2.66666667 1.22222222]
//  [0.66666667 1.22222222 0.55555556 1.         0.44444444]
//  [1.         1.77777778 0.77777778 1.33333333 0.55555556]
//  [0.33333333 0.55555556 0.22222222 0.33333333 0.11111111]]