package com.wxsh;

import java.io.File;
import java.io.IOException;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.io.FileUtils;

import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.CoreDocument;
import edu.stanford.nlp.pipeline.CoreSentence;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;

/**
 * 一行一行的处理, 用于词形还原--推荐 
 * @author wxsh
 */
public class ExtractEnglishWords2 {

	private static StanfordCoreNLP pipeline;

	static {
		// set up pipeline properties
		Properties props = new Properties();
		try {
			props.load(ExtractEnglishWords2.class.getClassLoader().getResourceAsStream("StanfordCoreNLP.properties"));
		} catch (IOException e) {
			e.printStackTrace();
		}
		// set the list of annotators to run
		props.setProperty("annotators", "tokenize, ssplit, pos, lemma");
		// set a property for an annotator, in this case the coref annotator is
		// being set to use the neural algorithm
		props.setProperty("coref.algorithm", "neural");
		// build pipeline
		pipeline = new StanfordCoreNLP(props);
	}

	public static void main(String[] args) throws Exception {
		List<String> dictWords = FileUtils
				.readLines(new File("C:\\Users\\Administrator\\git\\EnglishWords\\dict\\my_dict.txt"), "UTF-8");
		File input = new File("D:\\test\\StoryEnglishWords.txt");
		List<String> lines = FileUtils.readLines(input, "UTF-8");
		Pattern pattern = Pattern.compile("^[a-zA-Z][a-zA-Z]*");
		Set<String> destLines = new LinkedHashSet<String>();
		Set<String> notWords = new LinkedHashSet<String>();

		/**
		 * 注意: 需要重复处理多次
		 */
		for(int i=1; i<=5; i++){
			for (String text : lines) {
				// create a document object
				CoreDocument document = new CoreDocument(text);
				// annotate the document
				pipeline.annotate(document);
				// examples
				List<CoreSentence> sentences = document.sentences();
				for (CoreSentence sentence : sentences) {
					for (CoreLabel token : sentence.tokens()) {
						String lemma = token.get(CoreAnnotations.LemmaAnnotation.class).trim();
						if (lemma.length() <= 1) {
							//System.out.println(lemma);
							continue;
						}
						Matcher matcher = pattern.matcher(lemma);
						if (dictWords.contains(lemma.toLowerCase())) {
							destLines.add(lemma);
							destLines.add(lemma.toLowerCase());
						} else {
							notWords.add(lemma);
							System.out.println(lemma);
						}
//						if (matcher.matches()) {
//						} else {
//							notWords.add(lemma);
//							System.out.println(lemma);
//						}
					}
				}
			}
			lines.clear();
			lines.addAll(destLines);
			destLines.clear();
		}
		destLines.addAll(lines);
		
		FileUtils.writeLines(new File("D:\\test\\EnglishWords.txt"), "UTF-8", destLines, false);
		FileUtils.writeLines(new File("D:\\test\\NotEnglishWords2.txt"), "UTF-8", notWords, false);
	}

}