﻿using System;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Threading.Tasks;

namespace ZGSharp.Layers
{
	public class Linear : Tensor
	{
		public double[] weights;

		public bool use_bias;

		public double[] bias;

		public int input_size;

		private int weight_len;

		private bool back_inited = false;

		private double[] weight_gradient;

		private double[] bias_gradient;
		public Linear(int _input_size, int output_size, bool has_bias = true)
		{
			this.kind = TensorKinds.LayerLinear;
			this.input_size = _input_size;
			this.use_bias = has_bias;
			if (has_bias)
			{
				this.bias = new double[output_size];
			}
			if (_input_size == -1) 
			{
				shape = new int[] { -1, -1, output_size };
				return;
			}
			this.weight_len = output_size * this.input_size;
			this.weights = new double[this.weight_len];
			Random rdm = new Random();
            for (int i = 0; i < this.weight_len; i++)
            {
				this.weights[i] = rdm.NextDouble() * 0.4 - 0.2;
			}
			base.Construct(new int[]
			{
				-1,
				output_size
			});
		}

		public override Tensor LayerPred(Tensor input)
		{
			Tensor result;
			if (input == null)
			{
				this.output_tensor.Clear();
				result = null;
			}
			else
			{
				if (input.kind == TensorKinds.TensorNormal || input.build_only)
				{
					build_only = true;
					input_tensor = input;
					input.output_tensor.Add(this);
					return this;
				}
				build_only = false;
				if (input_size == -1)
				{
					input_size = input.value_len;
					int output_size = shape[2];
					this.weight_len = output_size * this.input_size;
					this.weights = new double[this.weight_len];
					Random rdm = new Random();
					for (int i = 0; i < this.weight_len; i++)
					{
						this.weights[i] = rdm.NextDouble() * 0.4 - 0.2;
					}
					base.Construct(new int[]
					{
						-1,
						output_size
					});
				}
				if (input.value_len != input_size)
				{
					throw new ArgumentException(string.Format("全连接层输入层和上层输出数量必须相同，{0}和{1}", this.input_size, input.value_len));
				}
				base.PredInit(input);
				Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
				{
					int batch_idx = i / this.value_len;
					int value_idx = i % this.value_len;
					double line_value = 0.0;
					for (int j = 0; j < this.input_size; j++)//(inputsize, valuesize)
					{
						line_value += input.outputs[batch_idx * this.input_size + j] * this.weights[j * this.value_len + value_idx];
					}
					if (this.use_bias)
					{
						line_value += this.bias[value_idx];
					}
					if (double.IsNaN(line_value)) 
					{
						throw new ArgumentException("is NaN value in linear");
					}
					this.outputs[i] = line_value;
				});
				result = this;
			}
			return result;
		}

		public override void Backward(Tensor back, double lr)
		{
			base.BackwordInit(null);
			bool flag = !this.back_inited;
			if (flag)
			{
				this.weight_gradient = new double[this.weight_len];
				bool flag2 = this.use_bias;
				if (flag2)
				{
					this.bias_gradient = new double[this.value_len];
				}
				this.back_inited = true;
			}
			for (int batch_idx = 0; batch_idx < batch_size; batch_idx++) { //weight 和 梯度是每个batch公用的，所以不能并行累加。
				Parallel.For(0, this.input_size, delegate (int input_idx)//(inputsize, valuesize)
				{
					int global_ipt_idx = batch_idx * this.input_size + input_idx;
					double in_gradient = 0;
					for (int value_idx = 0; value_idx < this.value_len; value_idx++)
					{
						//(batch, value_len, input_size)
						int weight_idx = input_idx * value_len + value_idx;
						int grad_index = batch_idx * value_len + value_idx;
						in_gradient += this.gradient[grad_index] * this.weights[weight_idx];
						this.weight_gradient[weight_idx] += this.gradient[grad_index] * this.input_tensor.outputs[global_ipt_idx] / batch_size;
					}
					input_tensor.gradient[global_ipt_idx] += in_gradient;
					if (double.IsNaN(input_tensor.gradient[global_ipt_idx]))
					{
						throw new ArgumentException("is NaN value in linear");
					}
				});
				if (use_bias)
				{
					Parallel.For(0, this.value_len, delegate (int value_idx)
					{
						this.bias_gradient[value_idx] += this.gradient[batch_idx * value_len + value_idx] / batch_size;
					});
				}
			}
			//继续传播
			this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				//梯度更新
				CommonDef.Opt.opt(this.weights, this.weight_gradient, lr);
				if (use_bias)
				{
					CommonDef.Opt.opt(this.bias, this.bias_gradient, lr);
				}
				this.input_tensor.Backward(this, lr);
				Array.Clear(this.gradient, 0, this.gradient.Length);
				Array.Clear(this.weight_gradient, 0, this.weight_gradient.Length);
				if (use_bias)
				{
					Array.Clear(this.bias_gradient, 0, this.bias_gradient.Length);
				}
			}
		}

		public void PrintWeights(int degree = 2)
		{
            Console.WriteLine(String.Format("({0}, {1})", input_size, value_len));
			for (int input_idx = 0; input_idx < input_size; input_idx++)
			{
				for (int value_idx = 0; value_idx < value_len; value_idx++)
				{ 
					Console.Write(Math.Round(weights[input_idx * value_len + value_idx], degree) + ", ");
				}
				Console.Write("\n");
			}
			Console.Write("\n");
		}

        internal override void SaveBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			Tensor input_tensor = this.input_tensor;
			int num = layer_count;
			layer_count = num + 1;
			input_tensor.tensor_id = num;
			save_msg.Add(string.Format("ID:{0},KIND:{1},INPUT_ID:{2},IN_SIZE:{3},OUT_SIZE:{4},USE_BIAS:{5},WEGIHTS:{6},BIAS:{7}", new object[]
			{
				this.tensor_id,
				this.kind,
				this.input_tensor.tensor_id,
				input_size,
				this.value_len,
				this.use_bias,
				CommonDef.GetBase64(this.weights),
				CommonDef.GetBase64(this.bias)
			}));
			this.output_tensor.Remove(back);
			bool flag = this.output_tensor.Count == 0;
			if (flag)
			{
				this.input_tensor.SaveBackward(this, save_msg, ref layer_count);
			}
		}
		internal override void StoreBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			Dictionary<string, string> map_items = CommonDef.GetSaveMsgDic(save_msg[0]);
			save_msg.RemoveAt(0);
			if (map_items["ID"] != tensor_id.ToString() 
				|| map_items["KIND"] != kind.ToString()
				|| map_items["IN_SIZE"] != input_size.ToString()
				|| map_items["OUT_SIZE"] != value_len.ToString()
				|| map_items["USE_BIAS"] != use_bias.ToString())
			{
				throw new ArgumentException(string.Format("装载失败{0}", tensor_id));
			}
			double[] store_weights = CommonDef.GetDataFromBase64(map_items["WEGIHTS"]);
			Array.Copy(store_weights, weights,  weights.Length);
			if (use_bias) 
			{
				double[] store_bias = CommonDef.GetDataFromBase64(map_items["BIAS"]);
				Array.Copy(store_bias, bias, bias.Length);
			}
			input_tensor.tensor_id = layer_count++;
			this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				this.input_tensor.StoreBackward(this, save_msg, ref layer_count);
			}

		}
	}
}
