﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace ZGSharp.Layers
{
    public class Pool2D : Tensor
    {
        private int[] pool_idx; //记录maxpool

        public int[] step;

        public int[] kernel_size;

        public void PredInit(Tensor _input_tensor)
        {
            if (_input_tensor.batch_size == 0)
            {
                throw new ArgumentException("batch size不能为0");
            }
            if (batch_size != shape[0])
            {
                this.pred_init = false;
                this.backward_init = false;
            }
            if (!pred_init)
            {
                this.batch_size = shape[0];
                this.outputs = new double[this.batch_size * this.value_len];
                this.pred_init = true;
            }
            this.input_tensor = _input_tensor;
            this.input_tensor.output_tensor.Add(this);
        }

        public static Tensor MaxPool(Tensor obj, int[] _kernel_size, int[] _step = null)
        {
            if (obj == null)
            {
                return null;
            }
            Pool2D tensor = new Pool2D();
            tensor.kind = TensorKinds.LayerMaxPool2D;
            tensor.step = _step;
            tensor.kernel_size = _kernel_size;
            if (_step == null)
            {
                tensor.step = new int[] { 2, 2 };
            }
            if (obj.kind == TensorKinds.TensorNormal || obj.build_only)
            {
                tensor.build_only = true;
                tensor.input_tensor = obj;
                obj.output_tensor.Add(tensor);
                return tensor;
            }
            tensor.build_only = false;
            int width = (obj.shape[2] + tensor.step[1] - 1) / tensor.step[1]; //向上取整
            int height = (obj.shape[1] + tensor.step[0] - 1) / tensor.step[0];
            tensor.shape = new int[] { obj.shape[0], height, width, obj.shape[3] };
            tensor.value_len = width * height * tensor.shape[3];
            tensor.PredInit(obj);
            tensor.pool_idx = new int[tensor.batch_size * tensor.value_len];
            Parallel.For(0, tensor.batch_size * tensor.value_len, delegate (int i) 
            {
                int batch_idx = i / tensor.value_len;
                int value_idx = i % tensor.value_len; //height, width, channel
                int height_idx = value_idx / (tensor.shape[2] * tensor.shape[3]); // out put height index
                int row_idx = value_idx % (tensor.shape[2] * tensor.shape[3]);
                int width_idx = row_idx / tensor.shape[3];                //out put width index
                int channel_idx = row_idx % tensor.shape[3];				// out channel index
                int ipt_h_idx = height_idx * tensor.step[0];
                int ipt_w_idx = width_idx * tensor.step[1];
                tensor.pool_idx[i] = 0;
                int ipt_value_idx = batch_idx * obj.value_len + ipt_h_idx * obj.shape[2] * obj.shape[3] + ipt_w_idx * obj.shape[3] + channel_idx;
                double max_value = obj.outputs[ipt_value_idx];
                for (int h_idx = 0; h_idx < _kernel_size[0]; h_idx++)
                {
                    for (int w_idx = 0; w_idx < _kernel_size[1]; w_idx++)
                    {
                        int check_h_idx = ipt_h_idx + h_idx;
                        int check_w_idx = ipt_w_idx + w_idx;
                        if (check_h_idx >= obj.shape[1] || check_w_idx >= obj.shape[2])
                        {
                            continue;
                        }
                        int check_value_idx = ipt_value_idx + h_idx * obj.shape[2] * obj.shape[3] + w_idx * obj.shape[3];
                        if (max_value < obj.outputs[check_value_idx]) 
                        {
                            max_value = obj.outputs[check_value_idx];
                            tensor.pool_idx[i] = h_idx * _kernel_size[1] + w_idx;
                        }
                    }
                }
                tensor.outputs[i] = max_value;
            });
            return tensor;
        }

        public override void Backward(Tensor back, double lr)
        {
            base.BackwordInit();
            //梯度使用累加的方式
            switch (kind)
            {
                case TensorKinds.LayerMaxPool2D:
                    Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
                    {
                        int batch_idx = i / value_len;
                        int value_idx = i % value_len; //height, width, channel
                        int height_idx = value_idx / (shape[2] * shape[3]); // out put height index
                        int row_idx = value_idx % (shape[2] * shape[3]);
                        int width_idx = row_idx / shape[3];                //out put width index
                        int channel_idx = row_idx % shape[3];                // out channel index
                        int ipt_h_idx = height_idx * step[0] + pool_idx[i] / kernel_size[1];
                        int ipt_w_idx = width_idx * step[1] + pool_idx[i] % kernel_size[1];
                        int input_value_idx = batch_idx * input_tensor.value_len + ipt_h_idx * input_tensor.shape[1] * input_tensor.shape[2] + ipt_w_idx * input_tensor.shape[2] + channel_idx;
                        input_tensor.gradient[input_value_idx] += gradient[i];
                    });
                    break;
            }
            this.output_tensor.Remove(back);
            bool flag = this.output_tensor.Count == 0;
            if (flag)
            {
                this.input_tensor.Backward(this, lr);
                Array.Clear(this.gradient, 0, this.gradient.Length);
            }
        }
    }
}
