﻿using System;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Threading.Tasks;

namespace ZGSharp
{
	public class TensorMath : Tensor
	{
		public double argv;
		public static Tensor Pow(Tensor obj, double _argv)
		{
			Tensor result;
			if (obj == null)
			{
				result = null;
			}
			else
			{
				TensorMath tensor = new TensorMath();
				tensor.kind = TensorKinds.LayerOperatorPow;
				if (obj.kind == TensorKinds.TensorNormal || obj.build_only)
				{
					tensor.build_only = true;
					tensor.input_tensor = obj;
					obj.output_tensor.Add(tensor);
					return tensor;
				}
				tensor.build_only = false;
				tensor.Construct(obj.shape);
				tensor.PredInit(obj);
				Parallel.For(0, tensor.batch_size * tensor.value_len, delegate (int i)
				{
					tensor.outputs[i] = Math.Pow(obj.outputs[i], _argv);
				});
				tensor.argv = _argv;
				result = tensor;
			}
			return result;
		}
		public static Tensor Log(Tensor obj, double _argv = Math.E)
		{
			Tensor result;
			if (obj == null)
			{
				result = null;
			}
			else
			{
				TensorMath tensor = new TensorMath();
				tensor.kind = TensorKinds.LayerOperatorLog;
				if (obj.kind == TensorKinds.TensorNormal || obj.build_only)
				{
					tensor.build_only = true;
					tensor.input_tensor = obj;
					obj.output_tensor.Add(tensor);
					return tensor;
				}
				tensor.build_only = false;
				tensor.Construct(obj.shape);
				tensor.PredInit(obj);
				Parallel.For(0, tensor.batch_size * tensor.value_len, delegate (int i)
				{
					tensor.outputs[i] = Math.Log(obj.outputs[i], _argv);
					if (double.IsNaN(tensor.outputs[i]) || tensor.outputs[i] < -200) //防止概率值为0或者特别小的值
					{
						tensor.outputs[i] = -200;
					}
				});
				tensor.argv = _argv;
				result = tensor;
			}
			return result;
		}

		public override void Backward(Tensor back, double lr)
		{
			base.BackwordInit(null);
			//累加方式传播梯度
			switch (kind) 
			{
				case TensorKinds.LayerOperatorPow:
					Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
					{
						this.input_tensor.gradient[i] += this.gradient[i] * this.argv * this.input_tensor.outputs[i];
					});
					break;
				case TensorKinds.LayerOperatorLog:
					Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
					{
						this.input_tensor.gradient[i] += this.gradient[i] / (input_tensor.outputs[i] * Math.Log(argv));
						if (double.IsNaN(input_tensor.gradient[i]))
						{
							throw new ArgumentException("is NaN value in Log");
						}
					});
					break;
				default:
					throw new ArgumentException("TensorMath不支持的反向传播操作：" + this.kind.ToString());
					break;
			}
			this.output_tensor.Remove(back);
			bool flag = this.output_tensor.Count == 0;
			if (flag)
			{
				this.input_tensor.Backward(this, lr);
				Array.Clear(this.gradient, 0, this.gradient.Length);
			}
		}
		internal override void SaveBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			input_tensor.tensor_id = layer_count++;
			save_msg.Add(string.Format("ID:{0},KIND:{1},INPUT_ID:{2},ARGV:{3}", new object[]
			{
				this.tensor_id,
				this.kind,
				this.input_tensor.tensor_id,
				this.argv
			}));
			this.output_tensor.Remove(back);
			bool flag = this.output_tensor.Count == 0;
			if (flag)
			{
				this.input_tensor.SaveBackward(this, save_msg, ref layer_count);
			}
		}
		internal override void StoreBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			Dictionary<string, string> map_items = CommonDef.GetSaveMsgDic(save_msg[0]);
			save_msg.RemoveAt(0);
			if (map_items["ID"] != tensor_id.ToString()
				|| map_items["KIND"] != kind.ToString())
			{
				throw new ArgumentException(string.Format("装载失败{0}", tensor_id));
			}
			input_tensor.tensor_id = layer_count++;
			this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				this.input_tensor.StoreBackward(this, save_msg, ref layer_count);
			}
		}
	}
}
