package com.wxsh;

import java.io.File;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.io.FileUtils;

import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.CoreDocument;
import edu.stanford.nlp.pipeline.CoreSentence;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;

/**
 * 从一篇英语文章中提取单词.
 * 整体一起处理.
 * @author wxsh
 */
public class ExtractEnglishWords {

    public static void main(String[] args) throws Exception {

        List<String> dictWords = FileUtils.readLines(new File("C:\\Users\\Administrator\\git\\EnglishWords\\dict\\my_dict.txt") , "UTF-8");
        File input = new File("D:\\test\\story.txt");
        List<String> lines = FileUtils.readLines(input, "UTF-8");
        StringBuilder sb = new StringBuilder();
        for (String line : lines) {
            String ln = line.replaceAll("(?i)^[ ]*gonna[ ]*$", " going to ")
                    .replaceAll("(?i)^[ ]*wanna[ ]*$", " want to ")
                    .replaceAll("(?i)^[ ]*gotta[ ]*$", " have got to ");
            sb.append(ln);
            sb.append(" ");
        }
        String text = sb.toString().replaceAll("  ", " ").trim();
//        Document doc = Jsoup.parse(input, "UTF-8");
//        String text = doc.text();
        
        // set up pipeline properties
        Properties props = new Properties();
        // set the list of annotators to run
        props.setProperty("annotators", "tokenize, ssplit, pos, lemma");
        // set a property for an annotator, in this case the coref annotator is
        // being set to use the neural algorithm
        props.setProperty("coref.algorithm", "neural");
        // build pipeline
        StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
        // create a document object
        CoreDocument document = new CoreDocument(text);
        // annotate the document
        pipeline.annotate(document);
        // examples

        Pattern pattern = Pattern.compile("^[a-zA-Z][a-zA-Z]*");
        Set<String> destLines = new LinkedHashSet<String>();
        Set<String> notWords = new LinkedHashSet<String>();

        List<CoreSentence> sentences = document.sentences();
        for (CoreSentence sentence : sentences) {
            for (CoreLabel token : sentence.tokens()) {
                String lemma = token.get(CoreAnnotations.LemmaAnnotation.class).trim().toLowerCase();
                if(lemma.length()<=1){
                    continue;
                }
                Matcher matcher = pattern.matcher(lemma);
                if (matcher.matches()) {
                    if(dictWords.contains(lemma)){
                        destLines.add(lemma);
                    }else{
                        notWords.add(lemma);
                        System.out.println(lemma);
                    }
                }else{
                	notWords.add(lemma);
                    System.out.println(lemma);
                }
            }
        }
        
        FileUtils.writeLines(new File("D:\\test\\StoryEnglishWords.txt"), "UTF-8", destLines, false);
        FileUtils.writeLines(new File("D:\\test\\NotEnglishWords.txt"), "UTF-8", notWords, false);

    }

}