﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace ZGSharp.Layers
{
    public class Softmax : Tensor
	{
		double[] in_batch_sum;
		public static Tensor softmax(Tensor obj)
		{
			Tensor result;
			if (obj == null)
			{
				result = null;
			}
			else
			{
				Softmax tensor = new Softmax();
				tensor.kind = TensorKinds.LayerSoftmax;
				if (obj.kind == TensorKinds.TensorNormal || obj.build_only)
				{
					tensor.build_only = true;
					tensor.input_tensor = obj;
					obj.output_tensor.Add(tensor);
					return tensor;
				}
				tensor.build_only = false;
				tensor.Construct(obj.shape);
				tensor.PredInit(obj);
				double[] max = new double[tensor.batch_size];
				Parallel.For(0, tensor.batch_size, delegate (int i)
				{
					max[i] = obj.outputs[i * obj.value_len];
					for (int j = 1; j < tensor.value_len; j++)
					{
						if (max[i] < obj.outputs[i * tensor.value_len + j])
						{
							max[i] = obj.outputs[i * tensor.value_len + j];
						}
					}
				});
				Parallel.For(0, tensor.batch_size * tensor.value_len, delegate (int i)
				{
					tensor.outputs[i] = Math.Exp(obj.outputs[i] - max[i / obj.value_len]);
					if (double.IsNaN(tensor.outputs[i]))
					{
						throw new ArgumentException("is NaN value in softmax");
					}
				});
				tensor.in_batch_sum = new double[tensor.batch_size];
				double[] sum = new double[tensor.batch_size];
				Parallel.For(0, tensor.batch_size, delegate (int i)
				{
					for (int j = 0; j < tensor.value_len; j++) 
					{
						tensor.in_batch_sum[i] += obj.outputs[i * tensor.value_len + j];
						sum[i] += tensor.outputs[i * tensor.value_len + j];
					}
				});
				Parallel.For(0, tensor.batch_size * tensor.value_len, delegate (int i)
				{
					int batch_idx = i / tensor.value_len;
					tensor.outputs[i] = tensor.outputs[i] / sum[batch_idx];
                });
				result = tensor;
			}
			return result;
		}

		public override void Backward(Tensor back, double lr)
		{
			base.BackwordInit(null);
			//梯度使用累加的方式
			Parallel.For(0, this.batch_size * this.value_len, delegate (int i)//从后往传
			{
				int batch_idx = i / value_len;
				int input_idx = i % value_len;
				for (int j = 0; j < this.value_len; j++) 
				{
					int out_global_idx = batch_idx * value_len + j;
					if (input_idx == j)
					{
						input_tensor.gradient[i] += gradient[out_global_idx] * outputs[out_global_idx] * (1 - outputs[out_global_idx]);
					}
					else
					{
						input_tensor.gradient[i] -= gradient[out_global_idx] * outputs[i] * outputs[out_global_idx];
					}
				}
				//input_tensor.gradient[i] += in_batch_sum[batch_idx] / input_tensor.value_len / input_tensor.batch_size; //关键，发现训练多次后，输入会便宜一个很大的区间，造成NAN，此操作将输入拉到0附近
				if (double.IsNaN(input_tensor.gradient[i]))
				{
					throw new ArgumentException("is NaN value in softmax");
				}
			});
			this.output_tensor.Remove(back);
			if (output_tensor.Count == 0)
			{
				this.input_tensor.Backward(this, lr);
				Array.Clear(this.gradient, 0, this.gradient.Length);
			}
		}
	}
}
