import java.io.StringReader;
import java.util.List;
import java.util.Properties;

import edu.stanford.nlp.ie.crf.CRFClassifier;
import edu.stanford.nlp.ling.HasWord;
import edu.stanford.nlp.ling.TaggedWord;
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.process.DocumentPreprocessor;
import edu.stanford.nlp.trees.PennTreebankLanguagePack;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreebankLanguagePack;
import edu.stanford.nlp.util.CoreMap;


public class NLPUtilities {
    final static private String GRAMMAR_PATH = "lib/stanford-parser/grammar";
    final static private String SEGMENTER_PATH = "lib/stanford-chinese-segmenter/data";
    
    final static private int maxTokenCountPerSentence = 50;
    final static private String[] sentenceEndings = new String[] {
        ".",
        "\u00B7",
        "!",
        "?",
        "\u3002",
        "\uFF01", // exclamation mark
        "\uFF1F", // question mark
        "\uFF0C"  // comma
    };
    
    static public LexicalizedParser parser;
    static public TreebankLanguagePack tlp;
    static public CRFClassifier<CoreMap> classifier;
    
    static public void initialize() {
        if (parser == null) {
            parser = createLexicalParser();
            tlp = new PennTreebankLanguagePack();
            classifier = createChineseClassifier();
            System.err.println("Done initializing libraries.");
        }
    }

    static private LexicalizedParser createLexicalParser() {
        LexicalizedParser lp = new LexicalizedParser(
        		GRAMMAR_PATH + "/chinesePCFG.ser.gz");
        return lp;
    }
    
    static private CRFClassifier<CoreMap> createChineseClassifier() {
        Properties props = new Properties();
        props.setProperty("sighanCorporaDict", SEGMENTER_PATH + "/releasedata");
        // props.setProperty("NormalizationTable", "data/norm.simp.utf8");
        // props.setProperty("normTableEncoding", "UTF-8");
        // below is needed because CTBSegDocumentIteratorFactory accesses it
        props.setProperty("serDictionary", SEGMENTER_PATH + "/dict-chris6.ser.gz");
        props.setProperty("inputEncoding", "UTF-8");
        props.setProperty("sighanPostProcessing", "true");
        
        CRFClassifier<CoreMap> classifier = new CRFClassifier<CoreMap>(props);
        classifier.loadClassifierNoExceptions(SEGMENTER_PATH + "/ctb.gz", props);
        // flags must be re-set after data is loaded
        classifier.flags.setProperties(props);
        
        return classifier;
    }
    
    static public String[] breakIntoParagraphs(String text) {
        return text.split("\\n+");
    }
    
    static public String segment(String text) {
        List<String> tokens = classifier.segmentString(text);
        StringBuffer sb = new StringBuffer();
        for (String token : tokens) {
            if (sb.length() > 0) {
                sb.append(' ');
            }
            sb.append(token);
        }
        return sb.toString();
    }
    
    static public interface ParserCallback {
        public void onSentence(int sentenceIndex, List<HasWord> sentence, Tree tree)
            throws Exception;
    }
    
    static public int parseSentences(String text, ParserCallback callback) throws Exception {
        DocumentPreprocessor dp = new DocumentPreprocessor(new StringReader(text));
        dp.setTokenizerFactory(tlp.getTokenizerFactory());
        dp.setSentenceFinalPuncWords(sentenceEndings);
        
        int sentenceCount = 0;
        for (List<HasWord> sentence : dp) {
            if (sentence.size() <= maxTokenCountPerSentence) {
                Tree tree = parser.apply(sentence);
                callback.onSentence(sentenceCount, sentence, tree);
            } else {
                callback.onSentence(sentenceCount, sentence, null);
            }
            sentenceCount++;
        }
        return sentenceCount;
    }
    
    static public String createOriginalText(List<HasWord> sentence) {
        StringBuffer sb = new StringBuffer();
        for (HasWord hw : sentence) {
            sb.append(hw.word());
        }
        return sb.toString();
    }
    
    static public String createSegmentedText(List<HasWord> sentence) {
        StringBuffer sb = new StringBuffer();
        for (HasWord hw : sentence) {
            if (sb.length() > 0) {
                sb.append(' ');
            }
            sb.append(hw.word());
        }
        return sb.toString();
    }
    
    static public String createTaggedText(Tree tree) {
        StringBuffer sb = new StringBuffer();
        for (TaggedWord taggedWord : tree.taggedYield()) {
            if (sb.length() > 0) {
                sb.append(' ');
            }
            
            sb.append(taggedWord.tag());
            sb.append('/');
            sb.append(taggedWord.word());
        }
        return sb.toString();
    }
    
}
