package neuralnetworks.anns;

import java.io.IOException;
import java.io.Serializable;

import Jama.Matrix;
import model.pretreatment.DataPretreatment;
import neuralnetworks.option.anns.AnnsOption;
import neuralnetworks.utils.mnist.MnistDataRead;

/**
 * 人工神经网络
 * @author LvWenJin
 *
 */
public class Anns implements Serializable{
	
	private static final long serialVersionUID = 1L;

	//配置项
	private AnnsOption option;
	
	public AnnsOption getOption() {
		return option;
	}

	public void setOption(AnnsOption option) {
		this.option = option;
	}

	
	private double cast = Integer.MAX_VALUE;
	
	//正则项
	private Matrix[] reg;
	
	public Anns(AnnsOption option) {
		this.option = option;
		
		//初始化权重矩阵
		this.thetas = this.option.getThetas();
		
		//初始化假设函数
		this.z = new Matrix[option.getHiddenlayersNum()+2];
		
		//加了偏置的输入层
		this.z[0] = addBias(option.getA1());
		
		//初始化中间结果
		this.a = new Matrix[option.getHiddenlayersNum()+2];
		
		//加了偏置的输入层
		this.a[0] = addBias(option.getA1());
		
		//每一层的偏差
		this.d = new Matrix[option.getHiddenlayersNum()+1];
		
		//初始化梯度
		this.gradient = new Matrix[option.getHiddenlayersNum()+1];
		
		this.reg = new Matrix[option.getHiddenlayersNum()+1];
	
	}
	
	//梯度
	private Matrix[] gradient;
	
	//每层的偏差
	private Matrix[] d;
	
	//中间结果
	private Matrix[] z;
	
	//中间结果,经过sigmod的结果
	private Matrix[] a;
	
	private Matrix[] thetas;
	
	
	public Matrix[] getThetas() {
		return thetas;
	}

	public void setThetas(Matrix[] thetas) {
		this.thetas = thetas;
	}

	public Matrix[] getGradient() {
		return gradient;
	}

	public void setGradient(Matrix[] gradient) {
		this.gradient = gradient;
	}

	public Matrix[] getD() {
		return d;
	}

	public void setD(Matrix[] d) {
		this.d = d;
	}

	public Matrix[] getZ() {
		return z;
	}

	public void setZ(Matrix[] z) {
		this.z = z;
	}

	public Matrix[] getA() {
		return a;
	}

	public void setA(Matrix[] a) {
		this.a = a;
	}

	
	/***
	 * 预测
	 * @return
	 * @throws IOException 
	 */
	public double testModel(String dataFileName,String labelFileName) throws IOException {
		//MnistDataRead.TEST_IMAGES_FILE
		double[][] images = MnistDataRead.getImages(dataFileName);
		//MnistDataRead.TEST_LABELS_FILE
		double[] label = MnistDataRead.getLabels(labelFileName);
		
		images = DataPretreatment.imageDataNorm(images);
		
		double[][] y = new double[images.length][10];
		
		for(int i = 0;i<images.length-1;i++) {
			y[i][(int)label[i]] = 1;
		}
		
		
		a[0] = addBias(new Matrix(images));
		z[0] = addBias(new Matrix(images));
		
		images = null;
		
		//计算所有的z(假设函数)和a(概率函数)
		//结果包含第一层输入层
		for(int i = 0;i<thetas.length;i++) {
			z[i+1] = this.a[i].times(thetas[i].transpose());
			if(i == thetas.length-1) {
				a[i+1] = sigmoidFun(z[i+1]);
				continue;
			}
			a[i+1] = addBias(sigmoidFun(z[i+1]));
		}
		
		double[][] result = (a[a.length-1]).getArray();
		int trueflag = 0;
		for(int i = 0;i<result.length;i++) {
			for(int j = 0;j<10;j++) {
				if(result[i][j] <0.5) {
					result[i][j] = 0;
				}else {
					result[i][j] = 1;
				}
			}
			
			if(check(result[i],y[i])) {
				trueflag+=1;
			}
		}
		return trueflag;
	}
	
	//比对结果
	private boolean check(double[] a,double[] b) {
		
		for(int i = 0;i<a.length;i++) {
			if(a[i] != b[i]) {
				return false;
			}
		}
		
		return true;
	}
	
	
	
	/***
	 * 预测
	 * @return
	 * @throws IOException 
	 */
	public Matrix predict(String fileName) throws IOException {
		Matrix createA1 = option.createA1(fileName);
		
		a[0] = addBias(createA1);
		z[0] = addBias(createA1);
		//计算所有的z(假设函数)和a(概率函数)
		//结果包含第一层输入层
		for(int i = 0;i<thetas.length;i++) {
			z[i+1] = this.a[i].times(thetas[i].transpose());
			if(i == thetas.length-1) {
				a[i+1] = sigmoidFun(z[i+1]);
				continue;
			}
			a[i+1] = addBias(sigmoidFun(z[i+1]));
		}
		return a[a.length-1];
	}
	
	
	/***
	 * 反向传播训练整个网络
	 * @param option  配置项
	 * @param theta1      第一层            26行
	 * @param theta2      第二层	  10行
	 * @param y 结果
	 * @return
	 */
	public void train(Matrix y) {
		double tempCast = Integer.MAX_VALUE-1;
		
		double stepMove = 0;
		while(cast - tempCast >= 1E-5 || cast - tempCast<0) {
			//获取所有的除了第一层的网络，当作中间结果
			//计算所有的z(假设函数)和a(概率函数)
			//结果包含第一层输入层
			for(int i = 0;i<thetas.length;i++) {
				z[i+1] = this.a[i].times(thetas[i].transpose());
				if(i == thetas.length-1) {
					a[i+1] = sigmoidFun(z[i+1]);
					continue;
				}
				a[i+1] = sigmoidFun(z[i+1]);
				a[i+1] = addBias(a[i+1]);
			}
			
			
			
			
			//从后往前传递偏差，计算梯度，相对于z的导数，没有第一层的
			for(int i = d.length-1 ; i>=0 ; i--) {
				if(i==d.length-1) {
					d[i] = a[i+1].minus(y);
					
					continue;
				}
				d[i] = d[i+1].times(thetas[i+1]);
				d[i] = moveBias(d[i]);
				d[i] = d[i].arrayTimes(diffSigmoid(z[i+1]));
				
				
			}
			
			
			for(int i=0;i<gradient.length;i++) {
				
				gradient[i] = d[i].transpose().times(a[i]);
				stepMove = (double)1/option.getNum();
				gradient[i] = gradient[i].times(stepMove);
				reg[i] = thetas[i].times((double)option.getLambda()/option.getNum());
				gradient[i] = gradient[i].plus(reg[i]);
			}
			
			
			//偏置去正则化
			noRegularBias();
			
			cast = tempCast;
			tempCast=getCast(this.a[a.length-1],y);
			
			//更新权值
			updateThetas();
			
			//垃圾回收
			System.gc();
		}
		
		
	}
	
	/***
	 * 查看所有的误差，在误差平稳的时候停止训练
	 * @return
	 */
	public double getCast(Matrix an,Matrix y) {
		double[][] am = an.getArray();
		double[][] b1 = new double[am.length][am[0].length];
		double[][] b2 = new double[am.length][am[0].length];
		double[][] y2 = new double[am.length][am[0].length];
		
		for(int i = 0;i<am.length;i++) {
			for(int j = 0;j<am[0].length;j++) {
				b1[i][j] = Math.log(am[i][j]);
				b2[i][j] = Math.log(1-am[i][j]);
				y2[i][j] = 1-y.get(i, j);
			}
		}
		
		Matrix b3 = new Matrix(b1);
		
		Matrix yy2 = new Matrix(y2);
		
		Matrix c1 = y.uminus();
		c1 = c1.arrayTimes(b3);
		
		Matrix c2 = yy2.arrayTimes(new Matrix(b2));
		
		c1.minusEquals(c2);
		
		double[] all = new double[c1.getColumnDimension()];
		
		double allCast = 0;
		for(int i = 0;i<c1.getColumnDimension();i++) {
			for(int j = 0;j<c1.getRowDimension();j++) {
				all[i] += c1.get(j, i);
			}
			allCast += all[i];
		}
		
		allCast = allCast/option.getNum();
		double regCast = getRegCast();
		
		regCast *= (double)option.getLambda()/(2*option.getNum());
		System.out.println(allCast);
		
		
		return allCast+regCast;
		//return ((double)allCast/option.getNum())+getRegCast();
	}
	
	/***
	 * 获取正则项之和
	 * @return
	 */
	public double getRegCast() {
		
		double castAll = 0;
		
		for(int i =0; i<thetas.length;i++) {
			Matrix matrix = new Matrix(thetas[i].getArray());
			matrix = thetas[i].arrayTimes(thetas[i]);
			double[] cast = new double[matrix.getColumnDimension()]; 
			double allcast = 0;
			for(int m =0;m<matrix.getColumnDimension();m++) {
				for(int n = 0;n<matrix.getRowDimension();n++) {
					cast[m] += matrix.get(n, m);
				}
				allcast += cast[m];
			}
			castAll += allcast;
			matrix = null;
		}
		
		//double result = ((double)option.getLambda()/(option.getNum()*2))*castAll;
		
		return castAll;
	}
	
	
	/**
	 * 更新权值
	 */
	private void updateThetas() {
		for(int i = 0;i<this.thetas.length;i++) {
			this.gradient[i] = this.gradient[i].times(option.getStep());
			thetas[i] = thetas[i].minus(this.gradient[i]);
		}
	}
	
	
	/**
	 * 添加偏置列
	 * @param argument
	 * @return 左边多一列都是1的偏置
	 */
	private Matrix addBias(Matrix argument) {
		double[][] result = new double[argument.getRowDimension()][argument.getColumnDimension()+1];
		for(int i = 0;i<result.length;i++) {
			result[i][0] = 1;
			for(int j = 0;j<result[0].length-1;j++) {
				result[i][j+1] = argument.get(i, j);
			}
		}
		return new Matrix(result);
	}
	
	/**
	 * 去掉最左列偏置列
	 * @param argument
	 * @return
	 */
	private Matrix moveBias(Matrix argument) {
		double[][] result = new double[argument.getRowDimension()][argument.getColumnDimension()-1];
		for(int i = 0;i<result.length;i++) {
			for(int j = 0;j<result[0].length;j++) {
				result[i][j] = argument.get(i, j+1);
			}
		}
		return new Matrix(result);
	}
	
	
	/***
	 * sigmoid函数，激活函数
	 * @param argument h(x)的计算结果
	 * @return 激活函数计算值
	 */
	private Matrix sigmoidFun(Matrix argument) {
		double[][] array = argument.getArray();
		Matrix result = new Matrix(argument.getRowDimension(), argument.getColumnDimension());
		for(int i = 0;i<argument.getRowDimension();i++) {
			for(int j = 0;j<argument.getColumnDimension();j++) {
				result.set(i, j, 1/(1+Math.exp(0-array[i][j])));
			}
		}
		return result;
	}
	
	/**  
	 * sigmoid函数的导数
	 * @param argument
	 * @return 导数值
	 * 
	 */
	private Matrix diffSigmoid(Matrix argument) {
		double[][] array = argument.getArray();
		Matrix result = new Matrix(argument.getRowDimension(), argument.getColumnDimension());
		double ele = 0;
		double sigmodEle = 0;
		
		for(int i = 0;i<argument.getRowDimension();i++) {
			for(int j = 0;j<argument.getColumnDimension();j++) {
				ele = array[i][j];
				sigmodEle = (double)1/(1+Math.exp(0-ele));
				result.set(i, j, sigmodEle*(1-sigmodEle));
			}
		}
		return result;
	}
	
	/**
	 * 去除偏置的正则化
	 * @return
	 */
	private void noRegularBias() {
		double temp = 0;
		for(int m = 0;m<this.gradient.length;m++) {
			
			for(int i = 0;i<gradient[m].getRowDimension();i++) {
				temp = gradient[m].get(i, 0);
				temp -= ((double)option.getLambda()/option.getNum())*thetas[m].get(i, 0);
				gradient[m].set(i, 0, temp);
			}
		}
	}
	
	
	
}
