package com.fengwk.deeplearning.core;

import java.io.Serializable;

import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;

import com.fengwk.support.exception.BaseException;

public class Layer implements Serializable {
	
	private static final long serialVersionUID = 6378370920271556153L;
	
	private UnitCompute.Enum uce;
	private UnitCompute uc;// 单元运算
	private int j;// 该层的单元数量
	
	private INDArray W;// j*n
	private INDArray b;// j*1
	
	private INDArray A_pre;// 上一层的A,n*m
	private INDArray Z;// 线性和,j*m
	
	public Layer(UnitCompute.Enum uce, int j) {
		if (uce == null)
			throw new BaseException("uce == null");
		this.uce = uce;
		this.j = j;
		this.uc = uce.instance();
	}
	
	/**
	 * 初始化
	 */
	void reset() {
		W = null;
		b = null;
	}
	
	/**
	 * 正向传播
	 * 
	 * @param A_pre
	 * @param keepProp
	 * @return
	 */
	INDArray foreprop(INDArray A_pre, float keepProp) {
		// 初始化
		this.A_pre = A_pre;// n*m
		if (W == null) {
			int n = A_pre.size(0);
			// 解决梯度爆炸或者梯度消失
			// Tabh:Math.sqrt(1d/(n*m))
			// Tabh:Math.sqrt(1d/n)
			// ReLU:Math.sqrt(2d/n)
			if (uce == UnitCompute.Enum.RELU)
				W = Nd4j.rand(j, n).mul(Math.sqrt(2d / n));// j*n
			else 
				W = Nd4j.rand(j, n).mul(Math.sqrt(1d / n));// j*n
		}
		if (b == null)
			b = Nd4j.zeros(j, 1);// j*1
		// 丢失
		if (keepProp < 1)
			dropout(A_pre, keepProp);
		// 传播
		int m = A_pre.size(1);// m应该是动态的,可以应对不同样本数量的训练
		Z = W.mmul(A_pre).add(b.broadcast(j, m));// j*m
		INDArray A = uc.activate(Z);// j*m
		return A;
	}
	
	private void dropout(INDArray A_pre, float keepProp) {
		INDArray dropout = Nd4j.rand(A_pre.shape());
		for (int i = 0; i < dropout.length(); i ++) {
			if (dropout.getFloat(i) > keepProp)
				A_pre.putScalar(i, 0);
		}
		A_pre.div(keepProp);
	}
	
	/**
	 * 反向传播
	 * 
	 * @param dA
	 * @param alpha 学习率
	 * @return
	 */
	INDArray backprop(INDArray dA, float alpha, float lambada) {
		INDArray dABydZ = uc.dActivate(Z);// j*m
		INDArray dZ = dA.mul(dABydZ);// j*m
		int m = A_pre.size(1);
		INDArray dW = dZ.mmul(A_pre.transpose()).div(m);// j*n
		INDArray db = dZ.mean(1);// j*1
		// L2正则化
		if (lambada != 0) {
			dW.add(W.mul(lambada / m));
			db.add(b.mul(lambada / m));
		}
		
		// 计算上一层的dA
		INDArray dA_pre = W.transpose().mmul(dZ);// n*m
		
		// 更新
		W = W.sub(dW.mul(alpha));// j*n
		b = b.sub(db.mul(alpha));// j*1
		
		return dA_pre;
	}

	/**
	 * 获取权重矩阵
	 * 
	 * @return
	 */
	INDArray W() {
		return W;
	}

	/**
	 * 获取偏移向量
	 * 
	 * @return
	 */
	INDArray b() {
		return b;
	}

	/**
	 * 获取单元数量
	 * 
	 * @return
	 */
	int unitCount() {
		return j;
	}

}
