﻿using System;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Threading.Tasks;

namespace ZGSharp.Layers
{
	public class Conv2D : Tensor
	{
		public double[] weights;

		public bool use_bias;

		public double[] bias;

		public int input_channel;

		public int output_channel;

		public int[] step;

		public int[] padding;

		public int[] kernel_size;

		private int weight_len;

		double padding_num;

		private bool back_inited = false;

		private double[] weight_gradient;

		private double[] bias_gradient;
		public Conv2D(int _in_channel, int _out_channel, int[] _kernel_size, int[] _padding = null, int[] _step = null, double _padding_num = 0, bool has_bias = true)
		{
			this.kind = TensorKinds.LayerConv2D;
			this.input_channel = _in_channel;
			output_channel = _out_channel;
			step = _step;
			padding = _padding;
			kernel_size = _kernel_size;
			this.weight_len = _kernel_size[0] * _kernel_size[1] * _in_channel * _out_channel;
			this.weights = new double[this.weight_len];
			Random rdm = new Random();
            for (int i = 0; i < this.weight_len; i++)
            {
				this.weights[i] = rdm.NextDouble() * 0.4 - 0.2;
			}
            this.use_bias = has_bias;
			bool flag = this.use_bias;
			if (flag)
			{
				this.bias = new double[_out_channel];
			}

			if (step == null) { step = new int[] { 1, 1 }; }
			if (padding == null) { padding = new int[] { 0, 0 }; }
			this.dim = 4;
			this.shape = new int[] { -1, -1, -1, _out_channel }; // batch_size, height, weight, channel
			this.value_len = -1;
			padding_num = _padding_num;
		}

		public override Tensor LayerPred(Tensor input)
		{
			if (input == null)
			{
				this.output_tensor.Clear();
				return null;
			}
			if (input.kind == TensorKinds.TensorNormal || input.build_only)
			{
				build_only = true;
				input_tensor = input;
				input.output_tensor.Add(this);
				return this;
			}
			build_only = false;
			if (input.shape[3] != input_channel)
			{
				throw new ArgumentException(string.Format("2D卷积层输入层和上层输出channel必须相同，{0}和{1}", input.shape[2], input_channel));
			}
			{
				//base.PredInit(input);
				if (input.batch_size == 0)
				{
					throw new ArgumentException("batch size不能为0");
				}
				if (batch_size != input.batch_size)
				{
					this.pred_init = false;
					this.backward_init = false;
				}
				if (!pred_init)
				{
					this.batch_size = input.batch_size;
					this.shape[0] = batch_size;
					this.pred_init = true;
					//size - (kernlesize + 1) 为卷积后的size
					//padding[0] * 2 paddingsize
					//step[0] - 1 整除加1
					int height = (input.shape[1] - kernel_size[0] + 1 + padding[0] * 2 + step[0] - 1) / step[0];
					int weight = (input.shape[2] - kernel_size[1] + 1 + padding[1] * 2 + step[1] - 1) / step[1];
					if (height < 1)
					{
						height = 1;
					}
					if (weight < 1)
					{
						weight = 1;
					}
					shape[1] = height;
					shape[2] = weight;
					value_len = shape[1] * shape[2] * output_channel;
					this.outputs = new double[batch_size * value_len];
				}
				this.input_tensor = input;
				this.input_tensor.output_tensor.Add(this);
			}
			int kernel_size_total = kernel_size[0] * kernel_size[1];
			int ouput_row_total = shape[2] * shape[3];
			int input_row_total = input.shape[2] * input.shape[3];
			int kernel_val = input_channel * output_channel;
			Parallel.For(0, this.batch_size * this.value_len, delegate (int i) // batch_size, height, weight, channel
			{
				int batch_idx = i / this.value_len;
				int value_idx = i % this.value_len;
				int height_idx = value_idx / ouput_row_total; // out put height index
				int row_idx = value_idx % ouput_row_total;
				int width_idx = row_idx / output_channel;                //out put width index
				int channel_idx = row_idx % output_channel;				// out channel index
				int ipt_w_idx_pre = width_idx * step[1] - padding[1];
				int ipt_h_idx_pre = height_idx * step[0] - padding[0];
				int in_batch_offset = batch_idx * input.value_len;
				double line_value = 0.0;
				for (int j = 0; j < kernel_size_total; j++)//逻辑img2col
				{
					int ipt_w_idx = ipt_w_idx_pre + j % kernel_size[1];
					int ipt_h_idx = ipt_h_idx_pre + j / kernel_size[1];
					int input_idx_pre = in_batch_offset + ipt_h_idx * input_row_total + ipt_w_idx * input_channel;
					int weight_idx_pre = j * kernel_val + channel_idx;
					for (int k = 0; k < input_channel; k++) { //input chanel
						double get_put_value = padding_num;
						if (ipt_w_idx >= 0 && ipt_w_idx < input.shape[2] && ipt_h_idx >= 0 && ipt_h_idx <input.shape[1]) 
						{
							int input_idx = input_idx_pre + k;
							get_put_value = input.outputs[input_idx];
						}
						// height, width, input_channel, output_channel
						int weight_idx = weight_idx_pre + k * input_channel;
						line_value += get_put_value * this.weights[weight_idx];
					}
				}
				if (this.use_bias)
				{
					line_value += this.bias[channel_idx];
				}
				this.outputs[i] = line_value;
			});
			return this;
		}

		public override void Backward(Tensor back, double lr)
		{
			base.BackwordInit(null);
			if (!this.back_inited)
			{
				this.weight_gradient = new double[this.weight_len];//_in_channel * _out_channel * _kernel_size[0] * _kernel_size[1];
				//weight_gradient_val = new double[batch_size * value_len * input_channel];//用于记录偏置的临时变量
				if (use_bias)
				{
					this.bias_gradient = new double[output_channel];
				}
				this.back_inited = true;
			}

			int out_size_total = shape[1] * shape[2];
			int kernel_val = input_channel * output_channel;
			int kernel_size_total = kernel_size[0] * kernel_size[1];
			int col_batch_size = out_size_total * input_channel;
			int ouput_row_total = shape[2] * shape[3];
			int input_row_total = input_tensor.shape[2] * input_tensor.shape[3];
			//例如 in(2, 5, 5, 3) kernel(2, 2, 3, 1) out(2, 4, 4, 1) backword-> (32, 3 * 4) = (12, 1) @ (32, 1)     (32, 3 * 4) -> (2, 5, 5, 3)
			for (int kernel_size_idx = 0; kernel_size_idx < kernel_size_total; kernel_size_idx++)//col2img 逻辑累加，不能并行
			{
			   // kernel_h * kernel_w
				int kernel_height_idx = kernel_size_idx / kernel_size[1];
				int kernel_width_idx = kernel_size_idx % kernel_size[1];
				int in_height_idx_offset = kernel_height_idx - padding[0];
				int in_width_idx_offset = kernel_width_idx - padding[1];
				Parallel.For(0, this.batch_size * col_batch_size, delegate (int out_value_idx) //batchsize * out_h * out_w * input_channel
				{
					int batch_idx = out_value_idx / col_batch_size;
					int col_value_idx = out_value_idx % col_batch_size;
					int out_size_idx = col_value_idx / input_channel;
					int ipt_channel_idx = out_value_idx % input_channel;
					int out_height_idx = out_size_idx / shape[2];
					int out_width_idx = out_size_idx % shape[2];
					int in_height_idx = out_height_idx * step[0] + in_height_idx_offset;
					int in_width_idx = out_width_idx * step[1] + in_width_idx_offset;
					if (in_height_idx < 0 || in_width_idx < 0 || in_height_idx >= input_tensor.shape[1] || in_width_idx >= input_tensor.shape[2]) 
					{//padding cut
						return;
					}
					//Interlocked.Increment
					double point_value = 0;
					int input_gradient_idx = batch_idx * input_tensor.value_len + in_height_idx * input_row_total + in_width_idx * input_channel + ipt_channel_idx;
					int weight_idx_pre = kernel_size_idx * kernel_val + ipt_channel_idx * input_channel;
					//int weight_val_idx_pre = out_value_idx * output_channel;
					double weight_gradient_par = input_tensor.outputs[input_gradient_idx] / batch_size;
					for (int out_channel_idx = 0; out_channel_idx < output_channel; out_channel_idx++)
					{
						// height, width, input_channel, output_channel
						int weight_idx = weight_idx_pre + out_channel_idx;
						int gradient_idx = batch_idx * value_len + out_height_idx * ouput_row_total + out_width_idx * shape[2] + out_channel_idx;
						point_value += weights[weight_idx] * gradient[gradient_idx];
						this.weight_gradient[weight_idx] += gradient[gradient_idx] * weight_gradient_par; 
					}
					input_tensor.gradient[input_gradient_idx] += point_value;
				});
			}
            if (use_bias)
            {
                Parallel.For(0, output_channel, delegate (int out_cn_idx)
                {
					for (int i = 0; i < out_size_total; i++)
					{ 
						this.bias_gradient[out_cn_idx] += this.gradient[i * output_channel + out_cn_idx] / batch_size;
					}
				});
            }

            //继续传播
            this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				//梯度更新
				CommonDef.Opt.opt(this.weights, this.weight_gradient, lr);
				if (use_bias)
				{
					CommonDef.Opt.opt(this.bias, this.bias_gradient, lr);
				}
				this.input_tensor.Backward(this, lr);
				Array.Clear(this.gradient, 0, this.gradient.Length);
				Array.Clear(this.weight_gradient, 0, this.weight_gradient.Length);
				if (use_bias)
				{
					Array.Clear(this.bias_gradient, 0, this.bias_gradient.Length);
				}
			}
		}

		public void PrintWeights(int degree = 2)
		{
            Console.WriteLine(String.Format("({0}, {1}, {2}, {3})", kernel_size[0], kernel_size[1], input_channel, output_channel));//_in_channel * _out_channel * _kernel_size[0] * _kernel_size[1]
			int kernel_size_total = kernel_size[0] * kernel_size[1];
			for (int kernel_idx = 0; kernel_idx < kernel_size_total; kernel_idx++)
			{
				for (int in_c_idx = 0; in_c_idx < input_channel; in_c_idx++)
				{
					for (int out_c_idx = 0; out_c_idx < output_channel; out_c_idx++)
					{
						int weight_idx = in_c_idx * output_channel * kernel_size_total + out_c_idx * kernel_size_total + kernel_idx;
						Console.Write(Math.Round(weights[weight_idx], degree) + ", ");
					}
					Console.Write("\n");
				}
				Console.Write("\n");
			}
			Console.Write("\n");
		}

        internal override void SaveBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			Tensor input_tensor = this.input_tensor;
			int num = layer_count;
			layer_count = num + 1;
			input_tensor.tensor_id = num;
			//(int _in_channel, int _out_channel, int[] _kernel_size, int[] _step = null, int[] _padding = null, double _padding_num = 0, bool has_bias = true)
			save_msg.Add(string.Format("ID:{0},KIND:{1},INPUT_ID:{2},IN_CHANNEL:{3},OUT_CHANNEL:{4},KERNEL_SIZE:{5},STEP:{6},PADDING:{7},PAD_NUM:{8},USE_BIAS:{9},WEGIHTS:{10},BIAS:{11}",
				new object[]
			{
				this.tensor_id,
				this.kind,
				this.input_tensor.tensor_id,
				input_channel,
				output_channel,
				kernel_size,
				step,
				padding,
				padding_num,
				this.use_bias,
				CommonDef.GetBase64(this.weights),
				CommonDef.GetBase64(this.bias)
			}));
			this.output_tensor.Remove(back);
			bool flag = this.output_tensor.Count == 0;
			if (flag)
			{
				this.input_tensor.SaveBackward(this, save_msg, ref layer_count);
			}
		}
		internal override void StoreBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			Dictionary<string, string> map_items = CommonDef.GetSaveMsgDic(save_msg[0]);
			save_msg.RemoveAt(0);
			if (map_items["ID"] != tensor_id.ToString() 
				|| map_items["KIND"] != kind.ToString()
				|| map_items["IN_CHANNEL"] != input_channel.ToString()
				|| map_items["OUT_CHANNEL"] != output_channel.ToString()
				|| map_items["KERNEL_SIZE"] != kernel_size.ToString())
			{
				throw new ArgumentException(string.Format("装载失败{0}", tensor_id));
			}
			double[] store_weights = CommonDef.GetDataFromBase64(map_items["WEGIHTS"]);
			Array.Copy(store_weights, weights,  weights.Length);
			if (use_bias) 
			{
				double[] store_bias = CommonDef.GetDataFromBase64(map_items["BIAS"]);
				Array.Copy(store_bias, bias, bias.Length);
			}
			input_tensor.tensor_id = layer_count++;
			this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				this.input_tensor.StoreBackward(this, save_msg, ref layer_count);
			}

		}
	}
}
