package com.nlp.mallet;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.ObjectOutputStream;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.regex.Pattern;

import cc.mallet.fst.CRF;
import cc.mallet.fst.CRFCacheStaleIndicator;
import cc.mallet.fst.CRFOptimizableByBatchLabelLikelihood;
import cc.mallet.fst.CRFOptimizableByLabelLikelihood;
import cc.mallet.fst.CRFTrainerByValueGradients;
import cc.mallet.fst.CRFWriter;
import cc.mallet.fst.MultiSegmentationEvaluator;
import cc.mallet.fst.SumLattice;
import cc.mallet.fst.SumLatticeDefault;
import cc.mallet.fst.ThreadedOptimizable;
import cc.mallet.fst.TransducerEvaluator;
import cc.mallet.fst.TransducerTrainer;
import cc.mallet.fst.ViterbiWriter;
import cc.mallet.optimize.Optimizable;
import cc.mallet.pipe.CharSequence2TokenSequence;
import cc.mallet.pipe.CharSequenceLowercase;
import cc.mallet.pipe.Input2CharSequence;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.TokenSequence2FeatureSequence;
import cc.mallet.pipe.TokenSequenceRemoveStopwords;
import cc.mallet.pipe.iterator.CsvIterator;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.InstanceList;

/**
 * 序列标注
 * @author ygsong.abcft
 *
 */
public class SequenceTag {
	
	/**
	 * 单线程序列标注
	 * @param trainingData
	 * @param testingData
	 */
	public void run (InstanceList trainingData, InstanceList testingData) {
		CRF crf = new CRF(trainingData.getDataAlphabet(),
				trainingData.getTargetAlphabet());
		crf.addFullyConnectedStatesForLabels();
		crf.setWeightsDimensionAsIn(trainingData,false);
		
		CRFOptimizableByLabelLikelihood optLabel = new CRFOptimizableByLabelLikelihood(crf, trainingData);
		evaluator(trainingData, testingData, crf, optLabel);		
	}
	
	
	/**
	 * 多线程序列标注
	 * @param trainingData
	 * @param testingData
	 */
	public void run2(InstanceList trainingData, InstanceList testingData) {
		int numThreads = 32;
		CRF crf = new CRF(trainingData.getDataAlphabet(),
				trainingData.getTargetAlphabet());
		CRFOptimizableByBatchLabelLikelihood batchOptLabel =
				new CRFOptimizableByBatchLabelLikelihood(crf, trainingData, numThreads);
		ThreadedOptimizable optLabel = new ThreadedOptimizable(
				batchOptLabel, trainingData, crf.getParameters().getNumFactors(),
				new CRFCacheStaleIndicator(crf));
		
		evaluator(trainingData, testingData, crf, optLabel);
		
		
		optLabel.shutdown();
		
		
	}


	private void evaluator(InstanceList trainingData, InstanceList testingData, CRF crf,
			Optimizable.ByGradientValue optLabel) {
		Optimizable.ByGradientValue[] opts = new Optimizable.ByGradientValue[] { optLabel };

		CRFTrainerByValueGradients crfTrainer = new CRFTrainerByValueGradients(crf, opts);

		String[] labels = new String[] { "I-PER", "I-LOC", "I-ORG", "I-MISC" };

		TransducerEvaluator evaluator = new MultiSegmentationEvaluator(new InstanceList[] { trainingData, testingData },
				new String[] { "train", "test" }, labels, labels) {
			@Override
			public boolean precondition(TransducerTrainer tt) {
				return tt.getIteration() % 5 == 0;
			}
		};

		crfTrainer.addEvaluator(evaluator);
		CRFWriter crfWriter = new CRFWriter("ner_crf.model") {
			@Override
			public boolean precondition(TransducerTrainer tt) {
				return tt.getIteration() % Integer.MAX_VALUE == 0;
			}
		};
		
		ViterbiWriter viterbiWriter = new ViterbiWriter(
				"ner_crf",
				new InstanceList[] {trainingData, testingData},
				new String[] {"train","test"});
		crfTrainer.addEvaluator(viterbiWriter);
		crfTrainer.addEvaluator(crfWriter);

		crfTrainer.setMaxResets(0);
		crfTrainer.train(trainingData, Integer.MAX_VALUE);

		evaluator.evaluate(crfTrainer);
		
		FeatureVectorSequence inputSeq = new FeatureVectorSequence(
				new FeatureVector[] {
						new FeatureVector(crf.getInputAlphabet(), new int[] {
								1, 2, 3 }),
						new FeatureVector(crf.getInputAlphabet(), new int[] {
								1, 2, 3 }),
						new FeatureVector(crf.getInputAlphabet(), new int[] {
								1, 2, 3 }),
						new FeatureVector(crf.getInputAlphabet(), new int[] {
								1, 2, 3 }), });
		
		FeatureSequence outSeq = new FeatureSequence(crf.getOutputAlphabet(),
				new int[] { 0, 1, 2, 3 });
		
		//计算输出序列的概率
		double logScore = new SumLatticeDefault(crf, inputSeq, outSeq)
				.getTotalWeight();
		double logZ = new SumLatticeDefault(crf, inputSeq).getTotalWeight();
		double prob = Math.exp(logScore - logZ);
		System.out.println(prob);
		
		//计算边际概率
		
		SumLattice lattice = new SumLatticeDefault(crf, inputSeq);
		int ip = 1;
		int si = 1;
		double twoStateMarginal = lattice.getXiProbability(ip, crf.getState(si), crf.getState(si));
		System.out.println(twoStateMarginal);
		double oneStateMarginal = lattice.getGammaProbability(ip+1, crf.getState(si));
		System.out.println(oneStateMarginal);
		
		 try {
		 FileOutputStream fos = new FileOutputStream("D:\\Mallet\\test\\ner_crf.model");
		 ObjectOutputStream oos = new ObjectOutputStream(fos);
		 oos.writeObject(crf);
		 fos.close();
		 oos.close();
		 } catch (IOException e) {
		 e.printStackTrace();
		 }
	}
	
	/**
	 * 导入数据
	 * @return
	 * @throws FileNotFoundException 
	 * @throws  
	 */
	public InstanceList importData() throws  IOException {
		ArrayList<Pipe> pipeList = new ArrayList<Pipe>();
		//转小写、过滤标记、去除停用词、映射到特征
		pipeList.add(new Input2CharSequence("UTF-8"));

		pipeList.add(new CharSequenceLowercase());
		pipeList.add( new CharSequence2TokenSequence(Pattern.compile("\\p{L}[\\p{L}\\p{P}]+\\p{L}")) );
        pipeList.add( new TokenSequenceRemoveStopwords(new File("D:\\crf\\data\\dictionary\\stopwords.txt"), "UTF-8", false, false, false) );
        pipeList.add( new TokenSequence2FeatureSequence() );
        SerialPipes serialPipes = new SerialPipes(pipeList);
        
		String inputFile = "D:\\Mallet\\test\\b.txt";
		String encoding = "UTF-8";
		Reader fileReader = new InputStreamReader(new FileInputStream(inputFile), encoding);
		String lineRegex = "^(\\S*)[\\s,]*(\\S*)[\\s,]*(.*)$";
		//3  2  1
		InstanceList instances = new InstanceList (serialPipes);

		instances.addThruPipe (new CsvIterator (fileReader, Pattern.compile(lineRegex),
				3, 2, 1));
		
		return instances;
		
	}
	public static void main(String[] args) throws IOException {
		SequenceTag tag = new SequenceTag();
		InstanceList importData = tag.importData();
		tag.run(importData,importData);
	}

}
