﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace ZGSharp.Layers
{
    public class Reduce : Tensor
    {
        private int[] reduce_dims = null;
        List<int> reduce_shape = null;
        private int reduce_count = 0;
        List<int> reduce_shape_offset = null;
        List<int> input_shape_offset = null;

        public void PredInit(Tensor _input_tensor)
        {
            if (_input_tensor.batch_size == 0)
            {
                throw new ArgumentException("batch size不能为0");
            }
            if (batch_size != shape[0])
            {
                this.pred_init = false;
                this.backward_init = false;
            }
            if (!this.pred_init)
            {
                this.batch_size = shape[0];
                this.outputs = new double[this.batch_size * this.value_len];
                this.pred_init = true;
            }
            this.input_tensor = _input_tensor;
            this.input_tensor.output_tensor.Add(this);
        }

        internal static Tensor sum(Tensor obj, int[] dims = null)
        {
            if (obj == null)
            {
                return null;
            }
            Reduce tensor = new Reduce();
            tensor.kind = TensorKinds.LayerReduceSum;
            if (obj.kind == TensorKinds.TensorNormal || obj.build_only)
            {
                tensor.build_only = true;
                tensor.input_tensor = obj;
                obj.output_tensor.Add(tensor);
                return tensor;
            }
            tensor.build_only = false;
            tensor.reduce_count = 1;
            tensor.reduce_shape = new List<int>();
            if (dims == null)//reduce all 
            {
                tensor.Construct(new int[] { 1 });
                List<int> delShape = new List<int>();
                for (int i = 0; i < obj.dim; i++)
                {
                    tensor.reduce_shape.Add(obj.shape[i]);
                    tensor.reduce_count = obj.shape[i] * tensor.reduce_count;
                    delShape.Add(i);
                }
                tensor.reduce_dims = delShape.ToArray(); //去掉的dim
            }
            else
            {
                if (dims.Length >= obj.dim)
                {
                    throw new ArgumentException("reduce的维度不能大于或等于原有维度");
                }
                List<int> inputShape = new List<int>(obj.shape);//保留的形状
                List<int> delShape = new List<int>(dims);
                delShape.Sort();
                tensor.reduce_dims = delShape.ToArray(); //去掉的dim
                for (int i = 0; i < delShape.Count; i++)
                {
                    tensor.reduce_shape.Add(obj.shape[delShape[i]]);
                    int delDim = delShape[delShape.Count - i - 1];//从大到小
                    if (delDim < inputShape.Count)
                    {
                        tensor.reduce_count = obj.shape[delShape[i]] * tensor.reduce_count;
                        inputShape.RemoveAt(delDim);
                    }
                    else
                    {
                        throw new ArgumentException("reduce参数有错误");
                    }
                }
                tensor.Construct(inputShape.ToArray());
            }
            tensor.PredInit(obj);
            //假如 ojb[3, 4, 5, 6] reduce_dims[2, 3] 那么shape[3, 4], reduce_shape[5, 6], reduce_count 30
            tensor.reduce_shape_offset = new List<int>() { 1 }; //记录每个维度的偏移量 [1, 6, 30]
            int current_off = 1;
            for (int i = 0; i < tensor.reduce_shape.Count; i++)
            {
                int idx = tensor.reduce_shape.Count - 1 - i;
                current_off *= tensor.reduce_shape[idx];
                tensor.reduce_shape_offset.Add(current_off);
            }
            tensor.input_shape_offset = new List<int>() { 1 }; //记录每个维度的偏移量 [1, 6, 30, 120, 360]
            current_off = 1;
            for (int i = 0; i < obj.dim; i++)
            {
                int idx = obj.dim - 1 - i;
                current_off *= obj.shape[idx];
                tensor.input_shape_offset.Add(current_off);
            }

            Parallel.For(0, tensor.batch_size * tensor.value_len, delegate (int i) //并行12次 输出shape[3, 4]
            {
                double sum_value = 0;
                //这里有点绕，因为维度数不固定，用假设的办法梳理思路。 
                for (int j = 0; j < tensor.reduce_count; j++) //0~30 假如 j = 8 reduce_shape[5, 6] 那么 dim_pos [1, 2]
                {
                    int input_idx = 0;
                    for (int k = 0; k < tensor.reduce_dims.Length; k++) //假如k = 0
                    {
                        int pos = j / tensor.reduce_shape_offset[k];
                        pos = pos % tensor.reduce_shape_offset[k + 1]; //pos = 2
                        int this_dim = tensor.reduce_dims[tensor.reduce_dims.Length - 1 - k]; //reduce_dims[2, 3] this_dim 3
                        input_idx += tensor.input_shape_offset[obj.dim - this_dim - 1] * pos;
                    }
                    input_idx += i * tensor.reduce_count;
                    sum_value += obj.outputs[input_idx];
                }
                tensor.outputs[i] = sum_value;
            });
            return tensor;
        }

        public static Tensor mean(Tensor obj, int[] dims = null)
        {
            bool flag = obj == null;
            Tensor result;
            if (flag)
            {
                result = null;
            }
            else
            {
                Reduce tensor = new Reduce();
                tensor.kind = TensorKinds.LayerReduceMean;
                if (obj.kind == TensorKinds.TensorNormal || obj.build_only)
                {
                    tensor.build_only = true;
                    tensor.input_tensor = obj;
                    obj.output_tensor.Add(tensor);
                    return tensor;
                }
                tensor.build_only = false;
                tensor.reduce_count = 1;
                tensor.reduce_shape = new List<int>();
                if (dims == null)//reduce all 
                {
                    tensor.Construct(new int[] { 1 });
                    List<int> delShape = new List<int>();
                    for (int i = 0; i < obj.dim; i++)
                    {
                        tensor.reduce_shape.Add(obj.shape[i]);
                        tensor.reduce_count = obj.shape[i] * tensor.reduce_count;
                        delShape.Add(i);
                    }
                    tensor.reduce_dims = delShape.ToArray(); //去掉的dim
                }
                else
                {
                    if (dims.Length >= obj.dim)
                    {
                        throw new ArgumentException("reduce的维度不能大于或等于原有维度");
                    }
                    List<int> inputShape = new List<int>(obj.shape);//保留的形状
                    List<int> delShape = new List<int>(dims);
                    delShape.Sort();
                    tensor.reduce_dims = delShape.ToArray(); //去掉的dim
                    for (int i = 0; i < delShape.Count; i++)
                    {
                        tensor.reduce_shape.Add(obj.shape[delShape[i]]);
                        int delDim = delShape[delShape.Count - i - 1];//从大到小
                        if (delDim < inputShape.Count)
                        {
                            tensor.reduce_count = obj.shape[delShape[i]] * tensor.reduce_count;
                            inputShape.RemoveAt(delDim);
                        }
                        else
                        {
                            throw new ArgumentException("reduce参数有错误");
                        }
                    }
                    tensor.Construct(inputShape.ToArray());
                }
                tensor.PredInit(obj);
                //假如 ojb[3, 4, 5, 6] reduce_dims[2, 3] 那么shape[3, 4], reduce_shape[5, 6], reduce_count 30
                tensor.reduce_shape_offset = new List<int>() { 1 }; //记录每个维度的偏移量 [1, 6, 30]
                int current_off = 1;
                for (int i = 0; i < tensor.reduce_shape.Count; i++)
                {
                    int idx = tensor.reduce_shape.Count - 1 - i;
                    current_off *= tensor.reduce_shape[idx];
                    tensor.reduce_shape_offset.Add(current_off);
                }
                tensor.input_shape_offset = new List<int>() { 1 }; //记录每个维度的偏移量 [1, 6, 30, 120, 360]
                current_off = 1;
                for (int i = 0; i < obj.dim; i++)
                {
                    int idx = obj.dim - 1 - i;
                    current_off *= obj.shape[idx];
                    tensor.input_shape_offset.Add(current_off);
                }

                Parallel.For(0, tensor.batch_size * tensor.value_len, delegate (int i) //并行12次 输出shape[3, 4]
                {
                    double avg_value = 0;
                    //这里有点绕，因为维度数不固定，用假设的办法梳理思路。 
                    for (int j = 0; j < tensor.reduce_count; j++) //0~30 假如 j = 8 reduce_shape[5, 6] 那么 dim_pos [1, 2]
                    {
                        int input_idx = 0;
                        for (int k = 0; k < tensor.reduce_dims.Length; k++) //假如k = 0
                        {
                            int pos = j / tensor.reduce_shape_offset[k];
                            pos = pos % tensor.reduce_shape_offset[k + 1]; //pos = 2
                            int this_dim = tensor.reduce_dims[tensor.reduce_dims.Length - 1 - k]; //reduce_dims[2, 3] this_dim 3
                            input_idx += tensor.input_shape_offset[obj.dim - this_dim - 1] * pos;
                        }
                        input_idx += i * tensor.reduce_count;
                        avg_value += obj.outputs[input_idx] / tensor.reduce_count;
                    }
                    tensor.outputs[i] = avg_value;
                });
                result = tensor;
            }
            return result;
        }

        public override void Backward(Tensor back, double lr)
        {
            base.BackwordInit();
            //梯度使用累加的方式
            switch (kind)
            {
                case TensorKinds.LayerReduceMean:
                    Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
                    {
                        for (int j = 0; j < reduce_count; j++) //0~30
                        {
                            int input_idx = 0;
                            for (int k = 0; k < reduce_dims.Length; k++) //0, 1
                            {
                                int pos = j / reduce_shape_offset[k];
                                pos = pos % reduce_shape_offset[k + 1]; //pos = 2
                                int this_dim = reduce_dims[reduce_dims.Length - 1 - k]; //reduce_dims[1, 3] this_dim 3
                                input_idx += input_shape_offset[input_tensor.dim - this_dim - 1] * pos;
                            }
                            input_idx += i * reduce_count;
                            this.input_tensor.gradient[input_idx] += this.gradient[i] / reduce_count;
                        }
                    });
                    break;
                case TensorKinds.LayerReduceSum:
                    Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
                    {
                        for (int j = 0; j < reduce_count; j++) //0~30
                        {
                            int input_idx = 0;
                            for (int k = 0; k < reduce_dims.Length; k++) //0, 1
                            {
                                int pos = j / reduce_shape_offset[k];
                                pos = pos % reduce_shape_offset[k + 1]; //pos = 2
                                int this_dim = reduce_dims[reduce_dims.Length - 1 - k]; //reduce_dims[1, 3] this_dim 3
                                input_idx += input_shape_offset[input_tensor.dim - this_dim - 1] * pos;
                            }
                            input_idx += i * reduce_count;
                            this.input_tensor.gradient[input_idx] += this.gradient[i];
                        }
                    });
                    break;
            }
            this.output_tensor.Remove(back);
            bool flag = this.output_tensor.Count == 0;
            if (flag)
            {
                this.input_tensor.Backward(this, lr);
                Array.Clear(this.gradient, 0, this.gradient.Length);
            }
        }
    }
}
