/**
 * 2017年5月24日
 */
package cn.edu.bjtu.test.model;

import java.io.File;
import java.util.HashMap;
import java.util.Map;

import org.datavec.api.split.FileSplit;
import org.deeplearning4j.eval.Evaluation;
import org.deeplearning4j.iterator.LabeledSentenceProvider;

import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.word2vec.Word2Vec;
import org.deeplearning4j.nn.api.OptimizationAlgorithm;
import org.deeplearning4j.nn.conf.LearningRatePolicy;
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.Updater;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer;
import org.deeplearning4j.nn.conf.layers.DenseLayer;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
import org.deeplearning4j.nn.conf.layers.SubsamplingLayer;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.deeplearning4j.nn.weights.WeightInit;
import org.deeplearning4j.optimize.listeners.ScoreIterationListener;

import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;

import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.dataset.api.DataSet;

import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;

import org.nd4j.linalg.lossfunctions.LossFunctions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import cn.edu.bjtu.configuration.TextCategorizationCNNConfig;
import cn.edu.bjtu.core.LoggerSupport;
import cn.edu.bjtu.datasource.lsp.TrainCNNSentenceProvider;
import cn.edu.bjtu.test.model.datasource.CnnSentenceDataSetIterator;
import cn.edu.bjtu.tokenization.AnsjTokenzierFactory;



//TODO BEGIN
public class LenetApp extends LoggerSupport {
	static Logger logger = LoggerFactory.getLogger(LenetApp.class);
	static TextCategorizationCNNConfig config = TextCategorizationCNNConfig.get();
	static Word2Vec w2v = WordVectorSerializer.readWord2VecModel(config.getW2VModelPath());
	static TokenizerFactory tf  = new AnsjTokenzierFactory(false);
	private static CnnSentenceDataSetIterator getCNNDataSet(TokenizerFactory tf,
			LabeledSentenceProvider sp,int batch,int senLen){
		return new CnnSentenceDataSetIterator.Builder()
				
				.tokenizerFactory(tf)
		        .sentenceProvider(sp)
		        .wordVectors(w2v)
		        .minibatchSize(batch)
		        .maxSentenceLength(senLen)
		        .useNormalizedWordVectors(false)
		        .build();
	}
	    public  static void main(String args[])throws Exception {
	        int nChannels = 1; // Number of input channels
	        int outputNum = 13; // The number of possible outcomes
	        int nEpochs = 100; // Number of training epochs
	        int iterations = 1; // Number of training iterations
	        int seed = 123; //

	        /*
	            Create an iterator using the batch size for one iteration
	         */
	        logger.info("Load data....");
	    	TrainCNNSentenceProvider train = new TrainCNNSentenceProvider();
			train.initialize(new FileSplit(new File(config.getDataSetDirOrFile())));
			
			TrainCNNSentenceProvider test = new TrainCNNSentenceProvider();
			test.initialize(new FileSplit(new File(config.getTestDataSetDirOrFile())));
			
	        DataSetIterator mnistTrain = getCNNDataSet(tf, train, 32, 256);
	        DataSetIterator mnistTest = getCNNDataSet(tf,test,32,256);

	        /*
	            Construct the neural network
	         */
	        logger.info("Build model....");

	        // learning rate schedule in the form of <Iteration #, Learning Rate>
	        Map<Integer, Double> lrSchedule = new HashMap<>();
	        lrSchedule.put(0, 0.01);
	        lrSchedule.put(1000, 0.005);
	        lrSchedule.put(3000, 0.001);

	        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
	                .seed(seed)
	                .iterations(iterations) // Training iterations as above
	                .regularization(true).l2(0.0005)
	                /*
	                    Uncomment the following for learning decay and bias
	                 */
	                .learningRate(.01)//.biasLearningRate(0.02)
	                /*
	                    Alternatively, you can use a learning rate schedule.

	                    NOTE: this LR schedule defined here overrides the rate set in .learningRate(). Also,
	                    if you're using the Transfer Learning API, this same override will carry over to
	                    your new model configuration.
	                */
	                .learningRateDecayPolicy(LearningRatePolicy.Schedule)
	                .learningRateSchedule(lrSchedule)
	                /*
	                    Below is an example of using inverse policy rate decay for learning rate
	                */
	                //.learningRateDecayPolicy(LearningRatePolicy.Inverse)
	                //.lrPolicyDecayRate(0.001)
	                //.lrPolicyPower(0.75)
	                .weightInit(WeightInit.XAVIER)
	                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
	                .updater(Updater.NESTEROVS).momentum(0.9)
	                .list()
	                .layer(0, new ConvolutionLayer.Builder(5, 5)
	                        //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied
	                        .nIn(nChannels)
	                        .stride(1, 1)
	                        .nOut(20)
	                        .activation(Activation.IDENTITY)
	                        .build())
	                .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX)
	                        .kernelSize(2,2)
	                        .stride(2,2)
	                        .build())
	                .layer(2, new ConvolutionLayer.Builder(5, 5)
	                        //Note that nIn need not be specified in later layers
	                        .stride(1, 1)
	                        .nOut(50)
	                        .activation(Activation.IDENTITY)
	                        .build())
	                .layer(3, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX)
	                        .kernelSize(2,2)
	                        .stride(2,2)
	                        .build())
	                .layer(4, new DenseLayer.Builder().activation(Activation.RELU)
	                        .nOut(100).build())
	                .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
	                        .nOut(13)
	                        .activation(Activation.SOFTMAX)
	                        .build())
	                .setInputType(InputType.convolutionalFlat(256,100,1)) //See note below
	                .backprop(true).pretrain(false).build();

	        /*
	        Regarding the .setInputType(InputType.convolutionalFlat(28,28,1)) line: This does a few things.
	        (a) It adds preprocessors, which handle things like the transition between the convolutional/subsampling layers
	            and the dense layer
	        (b) Does some additional configuration validation
	        (c) Where necessary, sets the nIn (number of input neurons, or input depth in the case of CNNs) values for each
	            layer based on the size of the previous layer (but it won't override values manually set by the user)

	        InputTypes can be used with other layer types too (RNNs, MLPs etc) not just CNNs.
	        For normal images (when using ImageRecordReader) use InputType.convolutional(height,width,depth).
	        MNIST record reader is a special case, that outputs 28x28 pixel grayscale (nChannels=1) images, in a "flattened"
	        row vector format (i.e., 1x784 vectors), hence the "convolutionalFlat" input type used here.
	        */

	        MultiLayerNetwork model = new MultiLayerNetwork(conf);
	        model.init();


	        logger.info("Train model....");
	        model.setListeners(new ScoreIterationListener(1));
	        for( int i=0; i<nEpochs; i++ ) {
	            model.fit(mnistTrain);
	            logger.info("*** Completed epoch {} ***", i);

	            logger.info("Evaluate model....");
	            Evaluation eval = new Evaluation(outputNum);
	            while(mnistTest.hasNext()){
	                DataSet ds = mnistTest.next();
	                INDArray output = model.output(ds.getFeatures(), false);
	                eval.eval(ds.getLabels(), output);

	            }
	            logger.info(eval.stats());
	            mnistTest.reset();
	        }
	        logger.info("****************Example finished********************");
	    }
}
