﻿using System;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Threading.Tasks;

namespace ZGSharp.Layers
{
	public class Operator : Tensor
	{
		public Tensor input_tensor2;
		public Tensor LayerAdd(Tensor obj1, double const_db)
		{
			Tensor result;
			if (obj1 == null)
			{
				this.output_tensor.Clear();
				result = null;
			}
			else
			{
				double[] objdb = new double[obj1.value_len * obj1.batch_size];
				Array.Fill<double>(objdb, const_db);
				int[] objdb_shape = new int[obj1.dim];
				Array.Copy(obj1.shape, objdb_shape, obj1.dim);
				Tensor obj2 = Tensor.ToTensor(objdb_shape, objdb);
				result = this.LayerAdd(obj1, obj2);
			}
			return result;
		}

		public Tensor LayerAdd(Tensor obj1, Tensor obj2)
		{
			Tensor result;
			if (obj1 == null || obj2 == null)
			{
				this.output_tensor.Clear();
				result = null;
			}
			else
			{
				this.kind = TensorKinds.LayerOperatorAdd;
				if (obj1.kind == TensorKinds.TensorNormal || obj1.build_only || obj2.kind == TensorKinds.TensorNormal || obj2.build_only)
				{
					build_only = true;
					input_tensor = obj1;
					input_tensor2 = obj2;
					obj1.output_tensor.Add(this);
					obj2.output_tensor.Add(this);
					return this;
				}
				build_only = false;
				if (obj1.dim != obj2.dim)
				{
					throw new ArgumentException(string.Format("张量相加必须使用相同维度{0}和{1}", obj1.dim, obj2.dim));
				}
				for (int j = 0; j < obj1.dim; j++)
				{
					bool flag3 = obj1.shape[j] != obj2.shape[j];
					if (flag3)
					{
						throw new ArgumentException(string.Format("张量相加必须使用相同形状{0}和{1}", obj1.PrintShape(), obj2.PrintShape()));
					}
				}
				this.input_tensor = obj1;
				this.input_tensor2 = obj2;
				base.Construct(obj1.shape);
				base.PredInit(obj1);
				obj2.output_tensor.Add(this);
				Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
				{
					this.outputs[i] = obj1.outputs[i] + obj2.outputs[i];
				});
				result = this;
			}
			return result;
		}

		internal Tensor LayerMinus(double const_db, Tensor obj2)
		{
			Tensor result;
			if (obj2 == null)
			{
				this.output_tensor.Clear();
				result = null;
			}
			else
			{
				double[] objdb = new double[obj2.batch_size * obj2.value_len];
				Array.Fill<double>(objdb, const_db);
				int[] objdb_shape = new int[obj2.dim];
				Array.Copy(obj2.shape, objdb_shape, obj2.dim);
				Tensor obj3 = Tensor.ToTensor(objdb_shape, objdb);
				result = this.LayerMinus(obj3, obj2);
			}
			return result;
		}

		internal Tensor LayerMinus(Tensor obj1, Tensor obj2)
		{
			Tensor result;
			if (obj1 == null || obj2 == null)
			{
				this.output_tensor.Clear();
				result = null;
			}
			else
			{
				this.kind = TensorKinds.LayerOperatorMinus;
				if (obj1.kind == TensorKinds.TensorNormal || obj1.build_only || obj2.kind == TensorKinds.TensorNormal || obj2.build_only)
				{
					build_only = true;
					input_tensor = obj1;
					input_tensor2 = obj2;
					obj1.output_tensor.Add(this);
					obj2.output_tensor.Add(this);
					return this;
				}
				build_only = false;
				if (obj1.dim != obj2.dim)
				{
					throw new ArgumentException(string.Format("张量相减必须使用相同维度{0}和{1}", obj1.dim, obj2.dim));
				}
				for (int j = 0; j < obj1.dim; j++)
				{
					bool flag3 = obj1.shape[j] != obj2.shape[j];
					if (flag3)
					{
						throw new ArgumentException(string.Format("张量相减必须使用相同形状{0}和{1}", obj1.PrintShape(), obj2.PrintShape()));
					}
				}
				this.input_tensor = obj1;
				this.input_tensor2 = obj2;
				base.Construct(obj1.shape);
				base.PredInit(obj1);
				obj2.output_tensor.Add(this);
				Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
				{
					this.outputs[i] = obj1.outputs[i] - obj2.outputs[i];
				});
				result = this;
			}
			return result;
		}

		public Tensor LayerMultiply(Tensor obj1, double const_db)
		{
			Tensor result;
			if (obj1 == null)
			{
				this.output_tensor.Clear();
				result = null;
			}
			else
			{
				double[] objdb = new double[obj1.value_len * obj1.batch_size];
				Array.Fill<double>(objdb, const_db);
				int[] objdb_shape = new int[obj1.dim];
				Array.Copy(obj1.shape, objdb_shape, obj1.dim);
				Tensor obj2 = Tensor.ToTensor(objdb_shape, objdb);
				result = this.LayerMultiply(obj1, obj2);
			}
			return result;
		}

		internal Tensor LayerMultiply(Tensor obj1, Tensor obj2)
		{
			Tensor result;
			if (obj1 == null || obj2 == null)
			{
				this.output_tensor.Clear();
				result = null;
			}
			else
			{
				this.kind = TensorKinds.LayerOperatorMultiply;
				if (obj1.kind == TensorKinds.TensorNormal || obj1.build_only || obj2.kind == TensorKinds.TensorNormal || obj2.build_only)
				{
					build_only = true;
					input_tensor = obj1;
					input_tensor2 = obj2;
					obj1.output_tensor.Add(this);
					obj2.output_tensor.Add(this);
					return this;
				}
				build_only = false;
				if (obj1.dim != obj2.dim)
				{
					throw new ArgumentException(string.Format("张量相乘必须使用相同维度{0}和{1}", obj1.dim, obj2.dim));
				}
				for (int j = 0; j < obj1.dim; j++)
				{
					bool flag3 = obj1.shape[j] != obj2.shape[j];
					if (flag3)
					{
						throw new ArgumentException(string.Format("张量相乘必须使用相同形状{0}和{1}", obj1.PrintShape(), obj2.PrintShape()));
					}
				}
				this.input_tensor = obj1;
				this.input_tensor2 = obj2;
				base.Construct(obj1.shape);
				base.PredInit(obj1);
				obj2.output_tensor.Add(this);
				Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
				{
					this.outputs[i] = obj1.outputs[i] * obj2.outputs[i];
				});
				result = this;
			}
			return result;
		}

		internal Tensor LayerDivide(double const_db, Tensor obj2)
		{
			Tensor result;
			if (obj2 == null)
			{
				this.output_tensor.Clear();
				result = null;
			}
			else
			{
				double[] objdb = new double[obj2.value_len * obj2.batch_size];
				Array.Fill<double>(objdb, const_db);
				int[] objdb_shape = new int[obj2.dim];
				Array.Copy(obj2.shape, objdb_shape, obj2.dim);
				Tensor obj3 = Tensor.ToTensor(objdb_shape, objdb);
				result = this.LayerDivide(obj3, obj2);
			}
			return result;
		}

		internal Tensor LayerDivide(Tensor obj1, Tensor obj2)
		{
			Tensor result;
			if (obj1 == null || obj2 == null)
			{
				this.output_tensor.Clear();
				result = null;
			}
			else
			{
				this.kind = TensorKinds.LayerOperatorDivide;
				if (obj1.kind == TensorKinds.TensorNormal || obj1.build_only || obj2.kind == TensorKinds.TensorNormal || obj2.build_only)
				{
					build_only = true;
					input_tensor = obj1;
					input_tensor2 = obj2;
					obj1.output_tensor.Add(this);
					obj2.output_tensor.Add(this);
					return this;
				}
				build_only = false;
				if (obj1.dim != obj2.dim)
				{
					throw new ArgumentException(string.Format("张量相除必须使用相同维度{0}和{1}", obj1.dim, obj2.dim));
				}
				for (int j = 0; j < obj1.dim; j++)
				{
					bool flag3 = obj1.shape[j] != obj2.shape[j];
					if (flag3)
					{
						throw new ArgumentException(string.Format("张量相除必须使用相同形状{0}和{1}", obj1.PrintShape(), obj2.PrintShape()));
					}
				}
				this.input_tensor = obj1;
				this.input_tensor2 = obj2;
				base.Construct(obj1.shape);
				base.PredInit(obj1);
				obj2.output_tensor.Add(this);
				Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
				{
					bool flag4 = obj2.outputs[i] < 1E-05 && obj2.outputs[i] >= 0.0;
					if (flag4)
					{
						obj2.outputs[i] = 1E-05;
					}
					bool flag5 = obj2.outputs[i] > -1E-05 && obj2.outputs[i] < 0.0;
					if (flag5)
					{
						obj2.outputs[i] = -1E-05;
					}
					this.outputs[i] = obj1.outputs[i] / obj2.outputs[i];
				});
				result = this;
			}
			return result;
		}

		public override void Backward(Tensor back, double lr)
		{
			base.BackwordInit(null);
			base.BackwordInit(this.input_tensor2);
			//梯度是用累加的方式
			switch (this.kind)
			{
				case TensorKinds.LayerOperatorAdd:
					Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
					{
						this.input_tensor.gradient[i] += this.gradient[i];
						this.input_tensor2.gradient[i] += this.gradient[i];
					});
					break;
				case TensorKinds.LayerOperatorMinus:
					Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
					{
						this.input_tensor.gradient[i] += this.gradient[i];
						this.input_tensor2.gradient[i] += -this.gradient[i];
					});
					break;
				case TensorKinds.LayerOperatorMultiply:
					Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
					{
						this.input_tensor.gradient[i] += this.gradient[i] * this.input_tensor2.outputs[i];
						this.input_tensor2.gradient[i] += this.gradient[i] * this.input_tensor.outputs[i];
					});
					break;
				case TensorKinds.LayerOperatorDivide:
					Parallel.For(0, this.batch_size * this.value_len, delegate (int i)
					{
						this.input_tensor.gradient[i] += this.gradient[i] / this.input_tensor2.outputs[i];
						this.input_tensor2.gradient[i] += this.gradient[i] * (this.input_tensor.outputs[i] / (this.input_tensor2.outputs[i] * this.input_tensor2.outputs[i]));
					});
					break;
				default:
					throw new ArgumentException("LayerOperator不支持的反向传播操作：" + this.kind.ToString());
			}
			this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				this.input_tensor.Backward(this, lr);
				this.input_tensor2.Backward(this, lr);
				Array.Clear(this.gradient, 0, this.gradient.Length);
			}
		}

		internal override void SaveBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			input_tensor.tensor_id = layer_count++;
			input_tensor2.tensor_id = layer_count++;
			save_msg.Add(string.Format("ID:{0},KIND:{1},INPUT_ID1:{2},INPUT_ID2:{3}", new object[]
			{
				this.tensor_id,
				this.kind,
				this.input_tensor.tensor_id,
				this.input_tensor2.tensor_id
			}));
			this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				this.input_tensor.SaveBackward(this, save_msg, ref layer_count);
				this.input_tensor2.SaveBackward(this, save_msg, ref layer_count);
			}
		}
		internal override void StoreBackward(Tensor back, List<string> save_msg, ref int layer_count)
		{
			Dictionary<string, string> map_items = CommonDef.GetSaveMsgDic(save_msg[0]);
			save_msg.RemoveAt(0);
			if (map_items["ID"] != tensor_id.ToString()
				|| map_items["KIND"] != kind.ToString())
			{
				throw new ArgumentException(string.Format("装载失败{0}", tensor_id));
			}
			input_tensor.tensor_id = layer_count++;
			input_tensor2.tensor_id = layer_count++;
			this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				this.input_tensor.StoreBackward(this, save_msg, ref layer_count);
			}

		}
	}
}
