﻿using System;
using System.Runtime.CompilerServices;
using System.Threading.Tasks;

namespace ZGSharp.Layers
{
	public class Dropout : Tensor
	{
		public byte[] drop_mask = null;
		public static Tensor dropout(Tensor obj, double rate = 0.2)
		{
			bool flag = obj == null;
			Tensor result;
			if (flag)
			{
				result = null;
			}
			else
			{
				Dropout tensor = new Dropout();
				tensor.kind = TensorKinds.LayerDropout;
				if (obj.kind == TensorKinds.TensorNormal || obj.build_only)
				{
					tensor.build_only = true;
					tensor.input_tensor = obj;
					obj.output_tensor.Add(tensor);
					return tensor;
				}
				tensor.build_only = false;
				tensor.Construct(obj.shape);
				tensor.PredInit(obj);
				tensor.drop_mask = new byte[tensor.value_len * tensor.batch_size];
				Random rdm = new Random();
				Parallel.For(0, tensor.value_len * tensor.batch_size, delegate (int i)
				{
					if (rdm.NextDouble() > rate)
					{
						tensor.outputs[i] = obj.outputs[i];
						tensor.drop_mask[i] = 1;
					}
					else
					{
						tensor.outputs[i] = 0.0;
						tensor.drop_mask[i] = 0;
					}
				});
				result = tensor;
			}
			return result;
		}

		public override void Backward(Tensor back, double lr)
		{
			BackwordInit(null);
			//因为可能存在多输出，接收到不同分支的backword所以梯度用累加的方式
			Parallel.For(0, this.value_len * batch_size, delegate (int i)
			{
				bool flag2 = this.drop_mask[i] == 1;
				if (flag2)
				{
					this.input_tensor.gradient[i] += this.gradient[i];
				}
				else
				{
					this.input_tensor.gradient[i] += 0.0;
				}
			});
			this.output_tensor.Remove(back);
			bool flag = this.output_tensor.Count == 0;
			if (flag)
			{
				this.input_tensor.Backward(this, lr);
				Array.Clear(this.gradient, 0, this.gradient.Length);
			}
		}
	}
}
