/**
 * RNN_LENET.java created by zhangzhidong 
 * at 下午12:49:50 2017年5月26日
 */
package cn.edu.bjtu.workbench.driver;

import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;

import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Options;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.deeplearning4j.nn.api.OptimizationAlgorithm;
import org.deeplearning4j.nn.conf.ComputationGraphConfiguration;
import org.deeplearning4j.nn.conf.ConvolutionMode;
import org.deeplearning4j.nn.conf.LearningRatePolicy;
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.Updater;
import org.deeplearning4j.nn.conf.graph.MergeVertex;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer;
import org.deeplearning4j.nn.conf.layers.DenseLayer;
import org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
import org.deeplearning4j.nn.conf.layers.SubsamplingLayer;
import org.deeplearning4j.nn.weights.WeightInit;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
import org.nd4j.linalg.lossfunctions.LossFunctions;

import cn.edu.bjtu.workbench.api.CNNDataSetIteratorProviderHandler;
import cn.edu.bjtu.workbench.api.CNNNetworkDesignHandler;
import cn.edu.bjtu.workbench.configuration.TextCategorizationCNNConfig;
import cn.edu.bjtu.workbench.core.Deep4jModelType;
import cn.edu.bjtu.workbench.model.DefaultCNNDataSetIteratorProviderHandler;
import cn.edu.bjtu.workbench.model.TextCategorizationCNNModel;

/**
 * @author zhangzhidong<br>
 * comment generated at 2017年5月26日下午12:49:50<br>
 * 
 */
public class LeAndMergeCnnNet extends DriverSupport implements CNNNetworkDesignHandler,CNNDataSetIteratorProviderHandler {
	TextCategorizationCNNModel model = null;
	DefaultCNNDataSetIteratorProviderHandler handler = new DefaultCNNDataSetIteratorProviderHandler();
	String WORD2VEC_MODEL_PATH = null;
	String TEXT_DIR_OR_FILE = null;
	String TEST_DIR_OR_FILE;
	String CNN_NETWORK_SAVE_FILE;
	int CNN_EPOCH = 10;
	protected Options getOptions(){
		Options options = new Options();  
		options.addOption("w2v", "word2vecfile", true, "word2vec model file");
		options.addOption("train", "trainfile", true, "train set to build the classifier");
		options.addOption("test", "testfile", true, "test set to help minize the network error");
		options.addOption("target","targetfile",true,"file to save the cnn network");
		options.addOption("epoch",true,"the epoch");
		return options;
	}
	public void runInternal(CommandLine cli) throws InterruptedException{
		config.setCNN_TYPE(Deep4jModelType.ComputationGraph);
		if(cli.hasOption("w2v")){
			config.setWORD2VEC_MODEL_PATH(cli.getOptionValue("w2v"));
		}
		if(cli.hasOption("train")){
			config.setTEXT_DIR_OR_FILE(cli.getOptionValue("train"));
		}
		if(cli.hasOption("test")){
			config.setTEST_DIR_OR_FILE(cli.getOptionValue("test"));
		}
		if(cli.hasOption("target")){
			config.setCNN_NETWORK_SAVE_FILE(cli.getOptionValue("target"));
		}
		if(cli.hasOption("epoch")){
			config.setCNN_EPOCH(Integer.parseInt(cli.getOptionValue("epoch")));
			//config.setCNN_EPOCH(1);
		}
		
		model = TextCategorizationCNNModel.get();
		//model.restoreFromFile();
		//System.out.println(Arrays.toString(model.predictDocument("我是中国人,我非常热爱自己的祖国.")));
		//if(true)return ;
//		
		logger.info("begin config");
		model.configNetworkDesignHandler(this);
		model.configDataSetIteratorHandlerProvider(this);
		//异步的,所以main线程要休息,让build先执行获取锁,然后主线程等build完成之后,调用saveModel保存
		logger.info("begin build");
		model.buildNetworkModel();
		TimeUnit.SECONDS.sleep(100);
		model.saveModel();
		
		//let build execute ,then main wating to save;
	
		
	}

	@Override
	public ComputationGraphConfiguration handleCGC(int embeddingWordVectorLength) {
		int vectorSize = embeddingWordVectorLength;
		int cnnLayerFeatureMaps = config.getCNNLayerFeatureMaps();
		ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder()
	            .weightInit(WeightInit.RELU)
	            .activation(Activation.LEAKYRELU)
	            .updater(Updater.ADAM)
	            .convolutionMode(ConvolutionMode.Same)      //This is important so we can 'stack' the results later
	            .regularization(true).l2(0.0001)
	            .learningRate(0.01)
	            .graphBuilder()
	            .addInputs("input")
	            .addLayer("cnn3", new ConvolutionLayer.Builder()
	                .kernelSize(3,vectorSize)
	                .stride(1,vectorSize)
	                .nIn(1)
	                .nOut(cnnLayerFeatureMaps)
	                .build(), "input")
	            .addLayer("cnn4", new ConvolutionLayer.Builder()
	                .kernelSize(4,vectorSize)
	                .stride(1,vectorSize)
	                .nIn(1)
	                .nOut(cnnLayerFeatureMaps)
	                .build(), "input")
	            .addLayer("cnn5", new ConvolutionLayer.Builder()
	                .kernelSize(5,vectorSize)
	                .stride(1,vectorSize)
	                .nIn(1)
	                .nOut(cnnLayerFeatureMaps)
	                .build(), "input")
	            .addVertex("merge", new MergeVertex(), "cnn3", "cnn4", "cnn5")      //Perform depth concatenation
	            .addLayer("globalPool", new GlobalPoolingLayer.Builder()
	                .poolingType(this.config.getPoolingType())
	                .build(), "merge")
	            .addLayer("out", new OutputLayer.Builder()
	                .lossFunction(LossFunctions.LossFunction.MCXENT)
	                .activation(Activation.SOFTMAX)
	                .nIn(3*cnnLayerFeatureMaps)
	                .nOut(13)    //2 classes: positive or negative
	                .build(), "globalPool")
	            .setOutputs("out")
	            .build();
	 return config;
	}
	
	@Override
	public MultiLayerConfiguration handleMLN(int embeddingWordVectorLength) {
			Map<Integer, Double> lrSchedule = new HashMap<>();
	        lrSchedule.put(0, 0.01);
	        lrSchedule.put(1000, 0.005);
	        lrSchedule.put(3000, 0.001);

	        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
	                .seed(1234)
	                .iterations(1) // Training iterations as above
	                .regularization(true).l2(0.0005)
	                .learningRate(.01)//.biasLearningRate(0.02)
	                .learningRateDecayPolicy(LearningRatePolicy.Schedule)
	                .learningRateSchedule(lrSchedule)
	                /*
	                    Below is an example of using inverse policy rate decay for learning rate
	                */
	                //.learningRateDecayPolicy(LearningRatePolicy.Inverse)
	                //.lrPolicyDecayRate(0.001)
	                //.lrPolicyPower(0.75)
	                .weightInit(WeightInit.XAVIER)
	                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
	                .updater(Updater.NESTEROVS).momentum(0.9)
	                .list()
	                .layer(0, new ConvolutionLayer.Builder(5, 5)
	                        //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied
	                        .nIn(1)
	                        .stride(1, 1)
	                        .nOut(20)
	                        .activation(Activation.IDENTITY)
	                        .build())
	                .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX)
	                        .kernelSize(2,2)
	                        .stride(2,2)
	                        .build())
	                .layer(2, new ConvolutionLayer.Builder(5, 5)
	                        //Note that nIn need not be specified in later layers
	                        .stride(1, 1)
	                        .nOut(50)
	                        .activation(Activation.IDENTITY)
	                        .build())
	                .layer(3, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX)
	                        .kernelSize(2,2)
	                        .stride(2,2)
	                        .build())
	                .layer(4, new DenseLayer.Builder().activation(Activation.RELU)
	                        .nOut(100).build())
	                .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
	                        .nOut(13)
	                        .activation(Activation.SOFTMAX)
	                        .build())
	                .setInputType(InputType.convolutionalFlat(256,100,1)) //See note below
	                .backprop(true).pretrain(false).build();
		return conf;
	}
	
	@Override
	public DataSetIterator handleTrain(TextCategorizationCNNConfig config, WordVectors wv, TokenizerFactory tf,
			int batch, int senLen) throws Exception {
		return handler.handleTrain(config, wv, tf, batch, senLen);
	}
	
	@Override
	public DataSetIterator handleTest(TextCategorizationCNNConfig config, WordVectors wv, TokenizerFactory tf,
			int batch, int senLen) throws Exception {
		return handler.handleTest(config, wv, tf, batch, senLen);
	}
	
}
