/**
 * 2017年5月20日
 */
package cn.edu.bjtu.workbench;

import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;

import org.apache.commons.io.FilenameUtils;
import org.datavec.api.split.FileSplit;
import org.deeplearning4j.berkeley.Pair;
import org.deeplearning4j.eval.Evaluation;
import org.deeplearning4j.iterator.CnnSentenceDataSetIterator;
import org.deeplearning4j.iterator.LabeledSentenceProvider;
import org.deeplearning4j.iterator.provider.FileLabeledSentenceProvider;
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.deeplearning4j.models.word2vec.Word2Vec;
import org.deeplearning4j.nn.api.Layer;
import org.deeplearning4j.nn.conf.ComputationGraphConfiguration;
import org.deeplearning4j.nn.conf.ConvolutionMode;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.Updater;
import org.deeplearning4j.nn.conf.graph.MergeVertex;
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer;
import org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
import org.deeplearning4j.nn.conf.layers.PoolingType;
import org.deeplearning4j.nn.graph.ComputationGraph;
import org.deeplearning4j.nn.weights.WeightInit;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.dataset.api.DataSet;
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
import org.nd4j.linalg.lossfunctions.LossFunctions;

import cn.edu.bjtu.workbench.configuration.TextCategorizationCNNConfig;
import cn.edu.bjtu.workbench.datasource.dsiter.TransformedVecDataSetIterator;
import cn.edu.bjtu.workbench.datasource.fileiter.TransformedVecWithIdLineVectorRecordReader;
import cn.edu.bjtu.workbench.datasource.lsp.TrainCNNSentenceProvider;
import cn.edu.bjtu.workbench.tokenization.AnsjTokenzierFactory;

/**
 * 使用cn.edu.bjtu.App统一入口
 * @author Alex
 *
 */
@Deprecated
public class AppCNN {
	
		public static final String  DATA_PATH = "D:\\textdata\\dl4j_w2vSentiment";
		
		public static void main1(String[] args) throws IOException, InterruptedException {
		       
	       

		}
		public static Word2Vec loadW2v(String path) throws IOException{
			String w2vPath = TextCategorizationCNNConfig.get().getW2VModelPath();
			System.out.println("load word2vec .....................");
			Word2Vec wv = WordVectorSerializer.readWord2VecModel(new File(path==null?w2vPath:path));
			System.out.println("load word2vec done .....................");
			return wv;
			
		}
		
		public static DataSetIterator getCNNIter(int batch,int senLen,Word2Vec wv,String path) throws IOException, InterruptedException{
			TrainCNNSentenceProvider tcsp = new TrainCNNSentenceProvider();
			tcsp.initialize(new FileSplit(new File(path)));
			LabeledSentenceProvider it = tcsp;
			
			TokenizerFactory tokenizerFactory = new AnsjTokenzierFactory(false);
			return new CnnSentenceDataSetIterator.Builder()
	        		.tokenizerFactory(tokenizerFactory)
					.sentenceProvider(it)
					.wordVectors(wv)
					.minibatchSize(batch)
					.maxSentenceLength(senLen)
					.useNormalizedWordVectors(false)
					.build();
			
		}
		public static ComputationGraphConfiguration getCNNConfig(int vectorSize,int cnnLayerFeatureMaps,PoolingType globalPoolingType ){
			 ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder()
			            .weightInit(WeightInit.RELU)
			            .activation(Activation.LEAKYRELU)
			            .updater(Updater.ADAM)
			            .convolutionMode(ConvolutionMode.Same)      //This is important so we can 'stack' the results later
			            .regularization(true).l2(0.0001)
			            .learningRate(0.01)
			            .graphBuilder()
			            .addInputs("input")
			            .addLayer("cnn3", new ConvolutionLayer.Builder()
			                .kernelSize(3,vectorSize)
			                .stride(1,vectorSize)
			                .nIn(1)
			                .nOut(cnnLayerFeatureMaps)
			                .build(), "input")
			            .addLayer("cnn4", new ConvolutionLayer.Builder()
			                .kernelSize(4,vectorSize)
			                .stride(1,vectorSize)
			                .nIn(1)
			                .nOut(cnnLayerFeatureMaps)
			                .build(), "input")
			            .addLayer("cnn5", new ConvolutionLayer.Builder()
			                .kernelSize(5,vectorSize)
			                .stride(1,vectorSize)
			                .nIn(1)
			                .nOut(cnnLayerFeatureMaps)
			                .build(), "input")
			            .addVertex("merge", new MergeVertex(), "cnn3", "cnn4", "cnn5")      //Perform depth concatenation
			            .addLayer("globalPool", new GlobalPoolingLayer.Builder()
			                .poolingType(globalPoolingType)
			                .build(), "merge")
			            .addLayer("out", new OutputLayer.Builder()
			                .lossFunction(LossFunctions.LossFunction.MCXENT)
			                .activation(Activation.SOFTMAX)
			                .nIn(3*cnnLayerFeatureMaps)
			                .nOut(15)    //2 classes: positive or negative
			                .build(), "globalPool")
			            .setOutputs("out")
			            .build();
			 return config;
			
		}
		static DataSetIterator getDataSetIterator(boolean isTraining, WordVectors wordVectors, int minibatchSize,int maxSentenceLength, Random rng ){
			String path = FilenameUtils.concat(DATA_PATH, (isTraining ? "aclImdb/train/" : "aclImdb/test/"));
			String positiveBaseDir = FilenameUtils.concat(path, "pos");
			String negativeBaseDir = FilenameUtils.concat(path, "neg");
			File filePositive = new File(positiveBaseDir);
			File fileNegative = new File(negativeBaseDir);
			Map<String,List<File>> reviewFilesMap = new HashMap<>();
			reviewFilesMap.put("Positive", Arrays.asList(filePositive.listFiles()));
			reviewFilesMap.put("Negative", Arrays.asList(fileNegative.listFiles()));
			LabeledSentenceProvider sentenceProvider = new FileLabeledSentenceProvider(reviewFilesMap, rng);
			
			LabeledSentenceProvider sp = sentenceProvider;
			while(sp.hasNext()){
				Pair<String,String> sent = sp.nextSentence();
				System.out.printf("%s,%s\n",sent.getSecond(),sent.getFirst());
			}
			
					return new CnnSentenceDataSetIterator.Builder()
					.sentenceProvider(sentenceProvider)
					.wordVectors(wordVectors)
					.minibatchSize(minibatchSize)
					.maxSentenceLength(maxSentenceLength)
					.useNormalizedWordVectors(false)
					.build();
		}
}
