/**
 * CoreNlpTokenizer.java
 *
 * @author: ZhuJiahui
 * @date: 2019/1/2 21:45
 */
package com.zhujiahui.corenlp.tokenizer;

import edu.stanford.nlp.ie.crf.CRFClassifier;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.process.PTBTokenizer;
import edu.stanford.nlp.tagger.maxent.MaxentTagger;
import org.apache.commons.lang3.StringUtils;

import java.io.Reader;
import java.io.StringReader;
import java.util.List;
import java.util.Properties;

/**
 * @description:
 * @author: ZhuJiahui
 * @version: 1.0
 */
@Deprecated
public class CoreNlpTokenizer {

    public static CRFClassifier setClassifier(String directory, String dicName,
                                              String exceptionFile) {
        Properties props = new Properties();
        props.setProperty("sighanCorporaDict", directory);
        props.setProperty("serDictionary", dicName);
        props.setProperty("inputEncoding", "UTF-8");
        props.setProperty("sighanPostProcessing", "true");


        CRFClassifier classifier = new CRFClassifier(props);
        classifier.loadClassifierNoExceptions(exceptionFile, props);
        classifier.flags.setProperties(props);

        return classifier;
    }

    public static String CNTokenize(String sentence, CRFClassifier classifier,
                                    MaxentTagger maxentTagger) {
        String[] segmentResult =
                (String[]) classifier.segmentString(sentence).toArray();
        String tagged = maxentTagger.tagString(StringUtils.join(segmentResult));

        return tagged.trim();
    }

    public static String CNTokenize2(String sentence,
                                     CRFClassifier classifier) {
        String[] segmentResult =
                (String[]) classifier.segmentString(sentence).toArray();
        return StringUtils.join(segmentResult).trim();
    }

    public static String ENTokenize(String sentence,
                                    MaxentTagger maxentTagger) {
        String tagged = maxentTagger.tagString(sentence);

        return tagged.trim();
    }

    public static String ENTokenize2(String sentence,
                                     MaxentTagger maxentTagger) {
        String tagged = maxentTagger.tagString(sentence).trim();

        String[] splitList = tagged.split(" ");
        String[] withoutTag = new String[splitList.length];

        for (int i = 0; i < splitList.length; i++) {
            withoutTag[i] = splitList[i].split("_")[0];
        }

        //Reader rs = new StringReader(sentence);
        //DocumentPreprocessor dp = new DocumentPreprocessor(rs);

        //PTBTokenizer<CoreLabel> ptbt = new PTBTokenizer<>(rs, new CoreLabelTokenFactory(), "");
        //CoreLabel label = ptbt.next();


        return StringUtils.join(withoutTag);
    }

    public static String ENTokenize3(String sentence) {

        Reader rs = new StringReader(sentence);
        //DocumentPreprocessor dp = new DocumentPreprocessor(rs);

        //PTBTokenizer<CoreLabel> ptbt = new PTBTokenizer<>(rs, new CoreLabelTokenFactory(), "");

        //CoreLabel label = ptbt.next();
        //ptbt.coreLabelFactory()
        List<CoreLabel> rawWords = PTBTokenizer.coreLabelFactory()
                .getTokenizer(new StringReader(sentence)).tokenize();
        String[] result = new String[rawWords.size()];

        for (int i = 0; i < rawWords.size(); i++) {
            result[i] = rawWords.get(i).value();
        }

        return StringUtils.join(result);
    }

    public static void main(String[] args) {
        // TODO Auto-generated method stub


        CRFClassifier classifier = setClassifier("edu/stanford/nlp/models",
                "edu/stanford/nlp/models/segmenter/chinese/dict-chris6.ser.gz",
                "edu/stanford/nlp/models/segmenter/chinese/ctb.gz");
        //MaxentTagger maxentTagger = new MaxentTagger("models/chinese-distsim.tagger");
        String sentence =
                "朱温进京，他先以迅雷不及掩耳之势冲进皇宫，根本没兴趣搭理皇帝，先把太监们斩尽杀绝。一共几百人，全部死在刀下。其中不仅有两个新任命的禁军司令官，连绝大多数无权无势、也属于被迫害的小太监们也不放过。史称当时哀号呼冤之声，宫外数里皆闻，把皇上和大臣们彻底震住，以后少废无数口舌。接着朱温下令，再把派到各战区当监军的太监们也都就地处决（这个命令各战区的同志们通力合作，愉快合作！）。就这样，为时１４９年（公元７５５——９０３年）的宦官统治天下的时代终于结束了。";
        String[] aaa = new String[1000];
        for (int i = 0; i < 1000; i++) {
            aaa[i] = sentence;
        }
        String allSentence = StringUtils.join(aaa);
        //String result2 = CNTokenize(sentence, classifier, maxentTagger);
        long startTime = System.currentTimeMillis();
        String result2 = CNTokenize2(allSentence, classifier);
        long endTime = System.currentTimeMillis();
        System.out.println(result2);
        System.out.println("程序运行时间： " + (endTime - startTime) + "ms");


		/*
		MaxentTagger maxentTagger = new MaxentTagger("models/english-left3words-distsim.tagger");
		String sentence = "PTBTokenizer is a an efficient, fast, deterministic tokenizer. (For the more technically inclined, it is implemented as a finite automaton, produced by JFlex.) On a 2015 laptop computer, it will tokenize text at a rate of about 1,000,000 tokens per second. While deterministic, it uses some quite good heuristics, so it can usually decide when single quotes are parts of words, when periods do an don't imply sentence boundaries, etc. Sentence splitting is a deterministic consequence of tokenization: a sentence ends when a sentence-ending character (., !, or ?) is found which is not grouped with other characters into a token (such as for an abbreviation or number), though it may still include a few tokens that can follow a sentence ending character as part of the same sentence (such as quotes and brackets).";
		String[] aaa = new String[1000];
		for (int i = 0; i < 1000; i++) {
			aaa[i] = sentence;
		}
		String allSentence = StringUtils.join(aaa);
		long startTime = System.currentTimeMillis();
		String tagged = ENTokenize(allSentence, maxentTagger);
		//String tagged = ENTokenize2(allSentence, maxentTagger);
		long endTime = System.currentTimeMillis();

		//System.out.println(tagged);  // 最后有一个空格
		System.out.println("程序运行时间： " + (endTime - startTime) + "ms");
		*/
		/*
		String serializedClassifier = "classifiers/english.all.3class.distsim.crf.ser.gz";
		AbstractSequenceClassifier<CoreLabel> classifier = null;
		try {
			classifier = CRFClassifier.getClassifier(serializedClassifier);
		} catch (ClassCastException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (ClassNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		String s1 = "Mary loves Bob .";
		System.out.println(classifier.classifyToString(s1));  // 最后无空格
		*/
//        String sentence = "PTBTokenizer is a an efficient, fast, deterministic tokenizer. (For the more technically inclined, it is implemented as a finite automaton, produced by JFlex.) On a 2015 laptop computer, it will tokenize text at a rate of about 1,000,000 tokens per second. While deterministic, it uses some quite good heuristics, so it can usually decide when single quotes are parts of words, when periods do an don't imply sentence boundaries, etc. Sentence splitting is a deterministic consequence of tokenization: a sentence ends when a sentence-ending character (., !, or ?) is found which is not grouped with other characters into a token (such as for an abbreviation or number), though it may still include a few tokens that can follow a sentence ending character as part of the same sentence (such as quotes and brackets).";
//        String[] aaa = new String[10000];
//        for (int i = 0; i < 10000; i++) {
//            aaa[i] = sentence;
//        }
//        String allSentence = StringUtils.join(aaa);
//
//        long startTime = System.currentTimeMillis();
//        String result = ENTokenize3(allSentence);
//        long endTime = System.currentTimeMillis();
//        System.out.println("程序运行时间： " + (endTime - startTime) + "ms");
//        System.out.println(result);

		/*
		for (int i = 0; i < result2.size(); i++) {
			System.out.println(result2.get(i));
		}*/

    }
}
