﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using ZGSharp.Layers;

namespace ZGSharp
{
	public class Tensor
	{
		public int dim;

		public int[] shape;

		public int value_len;

		public int batch_size;

		public bool pred_init = false;

		protected bool backward_init = false;

		public Tensor input_tensor = null;

		public List<Tensor> output_tensor = new List<Tensor>();

		public double[] outputs;

		public double[] gradient;

        public TensorKinds kind = TensorKinds.TensorNormal;

		public int tensor_id;

		public bool build_only = false;//仅构件图不计算

        //数组转常量tensor
        public static Tensor ToTensor(int[] _shape, double[] array)
		{
			Tensor tensor = new Tensor();
			tensor.kind = TensorKinds.TensorConst;
			tensor.batch_size = _shape[0];
			tensor.dim = _shape.Length;
			tensor.Construct(_shape);
			if (array.Length != tensor.batch_size * tensor.value_len)
			{
				throw new ArgumentException("tensor长度和形状不匹配");
			}
			tensor.outputs = new double[tensor.batch_size * tensor.value_len];
			tensor.pred_init = true;
			Parallel.For(0, tensor.batch_size * tensor.value_len, delegate (int i)
			{
				tensor.outputs[i] = array[i];
			});
			return tensor;
		}


		//初始化形状，不关注batch_size，适合大部分算子，有些算子需要自己实现
		public void Construct(int[] _shape)
		{
			this.dim = _shape.Length;
			this.shape = new int[this.dim];
			Array.Copy(_shape, shape, dim);
			this.value_len = 1;
			for (int i = 1; i < this.dim; i++)
			{
				this.value_len *= this.shape[i];
			}
		}
		//初始化存储，需要batch_size，适合大部分算子，有些算子需要自己实现
        protected void PredInit(Tensor _input_tensor)
		{
			if (_input_tensor.batch_size == 0)
			{
				throw new ArgumentException("batch size不能为0");
			}
			if (batch_size != _input_tensor.batch_size)
			{
				this.pred_init = false;
				this.backward_init = false;
			}
			if (!pred_init)
			{
				this.batch_size = _input_tensor.batch_size;
				this.shape[0] = this.batch_size;
				this.outputs = new double[this.batch_size * this.value_len];
				this.pred_init = true;
			}
			this.input_tensor = _input_tensor;
			this.input_tensor.output_tensor.Add(this);
		}

        public int GetMaxIndexInBatch(int i)
        {
			int max_index = 0;
			for (int j = 1; j < value_len; j++) 
			{
				if (outputs[i * value_len + j] > outputs[i * value_len + max_index]) 
				{
					max_index = j;
				}
			}
			return max_index;
        }

        public double Sum()
		{
			double sum = 0.0;
			foreach (double db in this.outputs)
			{
				sum += db;
			}
			return sum;
		}
		public double Avarage()
		{
			return Sum() / outputs.Length;
		}
		internal double Maxinum()
		{
			double max = outputs[0];
			foreach (double db in this.outputs)
			{
				if (db > max) 
				{
					max = db;
				}
			}
			return max;
		}
		public void BackwordInit(Tensor _input_tensor = null)
		{
			if (this.output_tensor.Count == 0) //没有outtensor 用于最后一层的loss函数。
			{
				this.gradient = new double[this.batch_size * this.value_len];
				Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
				{
					this.gradient[i] = this.outputs[i];
				});
			}
			Tensor input = _input_tensor;
			if (input == null)
			{
				input = this.input_tensor;
			}
			if (input != null && !input.backward_init)
			{
				input.gradient = new double[input.batch_size * input.value_len];
				Array.Clear(input.gradient, 0, input.gradient.Length);
				input.backward_init = true;
			}
		}
		public virtual Tensor LayerPred(Tensor input)
		{
			return null;
		}
		public virtual void Backward(Tensor back, double lr)
		{
		}
		internal virtual void SaveBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			bool flag = this.input_tensor != null;
			if (flag)
			{
				input_tensor.tensor_id = layer_count++;
				if (this.kind == TensorKinds.TensorConst)
				{
					save_msg.Add(string.Format("ID:{0},KIND:{1},INPUT_ID:{2},OUT_SIZE:{3},VALUE:{4}", new object[]
					{
						this.tensor_id,
						this.kind,
						this.input_tensor.tensor_id,
						this.value_len,
						CommonDef.GetBase64(this.outputs)
					}));
				}
				else
				{
					save_msg.Add(string.Format("ID:{0},KIND:{1},INPUT_ID:{2}", this.tensor_id, this.kind, this.input_tensor.tensor_id));
				}
				this.output_tensor.Remove(back);
				bool flag3 = this.output_tensor.Count == 0;
				if (flag3)
				{
					this.input_tensor.SaveBackward(this, save_msg, ref layer_count);
				}
			}
			else
			{
				save_msg.Add(string.Format("ID:{0},KIND:{1}\n", this.tensor_id, this.kind));
			}
		}
		internal virtual void StoreBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			if (input_tensor == null) //store finish
			{
				return;
			}
			Dictionary<string, string> map_items = CommonDef.GetSaveMsgDic(save_msg[0]);
			save_msg.RemoveAt(0);
			if (map_items["ID"] != tensor_id.ToString() || map_items["KIND"] != kind.ToString())
			{
				throw new ArgumentException("装载失败");
			}
			input_tensor.tensor_id = layer_count++;
			this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				this.input_tensor.StoreBackward(this, save_msg, ref layer_count);
			}

		}

		public static Tensor operator +(Tensor obj1, Tensor obj2)
		{
			Operator op = new Operator();
			return op.LayerAdd(obj1, obj2);
		}

		public static Tensor operator +(Tensor obj1, double obj2)
		{
			Operator op = new Operator();
			return op.LayerAdd(obj1, obj2);
		}

		public static Tensor operator +(double obj1, Tensor obj2)
		{
			Operator op = new Operator();
			return op.LayerAdd(obj2, obj1);
		}

		public static Tensor operator -(Tensor obj1, Tensor obj2)
		{
			Operator op = new Operator();
			return op.LayerMinus(obj1, obj2);
		}

		public static Tensor operator -(Tensor obj1, double obj2)
		{
			Operator op = new Operator();
			return op.LayerAdd(obj1, obj2 * -1.0);
		}

		public static Tensor operator -(double obj1, Tensor obj2)
		{
			Operator op = new Operator();
			return op.LayerMinus(obj1, obj2);
		}

		public static Tensor operator -(Tensor obj1)
		{
			Operator op = new Operator();
			return op.LayerMinus(0.0, obj1);
		}

		public static Tensor operator *(Tensor obj1, Tensor obj2)
		{
			Operator op = new Operator();
			return op.LayerMultiply(obj1, obj2);
		}

		public static Tensor operator *(Tensor obj1, double obj2)
		{
			Operator op = new Operator();
			return op.LayerMultiply(obj1, obj2);
		}

		public static Tensor operator *(double obj1, Tensor obj2)
		{
			Operator op = new Operator();
			return op.LayerMultiply(obj2, obj1);
		}

		public static Tensor operator /(Tensor obj1, Tensor obj2)
		{
			Operator op = new Operator();
			return op.LayerDivide(obj1, obj2);
		}

		public static Tensor operator /(Tensor obj1, double obj2)
		{
			bool flag = obj2 == 0.0;
			if (flag)
			{
				throw new ArgumentException("分母不能为0");
			}
			Operator op = new Operator();
			return op.LayerMultiply(obj1, 1.0 / obj2);
		}

		public static Tensor operator /(double obj1, Tensor obj2)
		{
			Operator op = new Operator();
			return op.LayerDivide(obj1, obj2);
		}

		public string PrintShape()
		{
			string shape_str = "(";
			for (int i = 0; i < this.dim; i++)
			{
				shape_str = shape_str + this.shape[i].ToString() + ",";
			}
			return shape_str.TrimEnd(',') + ")";
		}
		public void PrintGradient(int degree = 2)
		{
			Console.Write(PrintShape());
			int lastDim = shape[dim - 1];
			for (int i = 0; i < batch_size * value_len; i++)
			{
				if (i % lastDim == 0)
				{
					Console.Write("\n");
				}
				Console.Write(Math.Round(gradient[i], degree) + ", ");
			}
			Console.Write("\n");
		}
		public void PrintValue(int degree = 2)
		{
			Console.Write(PrintShape());
			int lastDim = shape[dim - 1];
			for (int i = 0; i < batch_size * value_len; i++)
			{
				if (i % lastDim == 0)
				{
					Console.Write("\n");
				}
				Console.Write(Math.Round(outputs[i], degree) + ", ");
			}
			Console.Write("\n");
		}
	}
}
