/**
 * DefaultCNNDesignHandler.java created by zhangzhidong 
 * at 下午2:30:02 2017年5月26日
 */
package cn.edu.bjtu.workbench.model;

import java.util.HashMap;
import java.util.Map;

import org.deeplearning4j.nn.api.OptimizationAlgorithm;
import org.deeplearning4j.nn.conf.ComputationGraphConfiguration;
import org.deeplearning4j.nn.conf.ConvolutionMode;
import org.deeplearning4j.nn.conf.LearningRatePolicy;
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.Updater;
import org.deeplearning4j.nn.conf.graph.MergeVertex;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer;
import org.deeplearning4j.nn.conf.layers.DenseLayer;
import org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
import org.deeplearning4j.nn.conf.layers.PoolingType;
import org.deeplearning4j.nn.conf.layers.SubsamplingLayer;
import org.deeplearning4j.nn.weights.WeightInit;
import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.lossfunctions.LossFunctions;

import cn.edu.bjtu.workbench.api.CNNNetworkDesignHandler;
import cn.edu.bjtu.workbench.configuration.TextCategorizationCNNConfig;

/**
 * @author zhangzhidong<br>
 * comment generated at 2017年5月26日下午2:30:02<br>
 * 
 */
public class DefaultCNNDesignHandler implements CNNNetworkDesignHandler{
	TextCategorizationCNNConfig config = null;
	public DefaultCNNDesignHandler(TextCategorizationCNNModel facade) {
		config = facade.getCNNConfig();
	}
	public DefaultCNNDesignHandler(TextCategorizationCNNConfig c) {
		this.config = c;
	}
	
	/**
	 * ComputaionGraph 可以自定义层与层之间是如何连接的。
	 * @Param 传入词向量个数
	 * @Author zhangzhidong
	 */
	
	@Override
	public MultiLayerConfiguration handleMLN(int embeddingWordVectorLength) {
		  Map<Integer, Double> lrSchedule = new HashMap<>();
	        lrSchedule.put(0, 0.01);
	        lrSchedule.put(1000, 0.005);
	        lrSchedule.put(3000, 0.001);
		  MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
	                .seed(123)
	                .iterations(1) // Training iterations as above
	                .regularization(true).l2(0.0005)
	                /*
	                    Uncomment the following for learning decay and bias
	                 */
	                .learningRate(.01)//.biasLearningRate(0.02)
	                /*
	                    Alternatively, you can use a learning rate schedule.

	                    NOTE: this LR schedule defined here overrides the rate set in .learningRate(). Also,
	                    if you're using the Transfer Learning API, this same override will carry over to
	                    your new model configuration.
	                */
	                .learningRateDecayPolicy(LearningRatePolicy.Schedule)
	                .learningRateSchedule(lrSchedule)
	                /*
	                    Below is an example of using inverse policy rate decay for learning rate
	                */
	                //.learningRateDecayPolicy(LearningRatePolicy.Inverse)
	                //.lrPolicyDecayRate(0.001)
	                //.lrPolicyPower(0.75)
	                .weightInit(WeightInit.XAVIER)
	                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
	                .updater(Updater.NESTEROVS).momentum(0.9)
	                .list()
	                .layer(0, new ConvolutionLayer.Builder(5, 5)
	                        //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied
	                        .nIn(1)
	                        .stride(1, 1)
	                        .nOut(20)
	                        .activation(Activation.IDENTITY)
	                        .build())
	                .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX)
	                        .kernelSize(2,2)
	                        .stride(2,2)
	                        .build())
	                .layer(2, new ConvolutionLayer.Builder(5, 5)
	                        //Note that nIn need not be specified in later layers
	                        .stride(1, 1)
	                        .nOut(50)
	                        .activation(Activation.IDENTITY)
	                        .build())
	                .layer(3, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX)
	                        .kernelSize(2,2)
	                        .stride(2,2)
	                        .build())
	                .layer(4, new DenseLayer.Builder().activation(Activation.RELU)
	                        .nOut(100).build())
	                .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
	                        .nOut(13)
	                        .activation(Activation.SOFTMAX)
	                        .build())
	                .setInputType(InputType.convolutionalFlat(256,100,1)) //See note below
	                .backprop(true).pretrain(false).build();
		return conf;
	}

	/**
	 * 一般的卷积神经网络。实现有很多种，可以自己搭建神经网络
	 * @Param
	 * @Author zhangzhidong
	 */
	
	@Override
	public ComputationGraphConfiguration  handleCGC(int embeddingWordVectorLength) {
		int vectorSize = embeddingWordVectorLength;
		int cnnLayerFeatureMaps = config.getCNNLayerFeatureMaps();
		PoolingType globalPoolingType = config.getPoolingType();
		ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder()
	            .weightInit(WeightInit.RELU)
	            .activation(Activation.LEAKYRELU)
	            .updater(Updater.ADAM)
	            .convolutionMode(ConvolutionMode.Same)      //This is important so we can 'stack' the results later
	            .regularization(true).l2(0.0001)
	            .learningRate(0.01)
	            .graphBuilder()
	            .addInputs("input")
	            .addLayer("cnn3", new ConvolutionLayer.Builder()
	                .kernelSize(3,vectorSize)
	                .stride(1,vectorSize)
	                .nIn(1)
	                .nOut(cnnLayerFeatureMaps)
	                .build(), "input")
	            .addLayer("cnn4", new ConvolutionLayer.Builder()
	                .kernelSize(4,vectorSize)
	                .stride(1,vectorSize)
	                .nIn(1)
	                .nOut(cnnLayerFeatureMaps)
	                .build(), "input")
	            .addLayer("cnn5", new ConvolutionLayer.Builder()
	                .kernelSize(5,vectorSize)
	                .stride(1,vectorSize)
	                .nIn(1)
	                .nOut(cnnLayerFeatureMaps)
	                .build(), "input")
	            .addVertex("merge", new MergeVertex(), "cnn3", "cnn4", "cnn5")      //Perform depth concatenation
	            .addLayer("globalPool", new GlobalPoolingLayer.Builder()
	                .poolingType(globalPoolingType)
	                .build(), "merge")
	            .addLayer("out", new OutputLayer.Builder()
	                .lossFunction(LossFunctions.LossFunction.MCXENT)
	                .activation(Activation.SOFTMAX)
	                .nIn(3*cnnLayerFeatureMaps)
	                .nOut(15)    //2 classes: positive or negative
	                .build(), "globalPool")
	            .setOutputs("out")
	            .build();
		return config;
	}

}
