package com.wxsh;

import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;

import org.apache.commons.io.FileUtils;

import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.CoreDocument;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;

/**
 * Stanford中文分词---推荐
 * @author wxsh
 */
public class ChineseTokenize {

	private static StanfordCoreNLP pipeline;
	static {
		try {
			// set up pipeline properties
			Properties props = new Properties();
			props.load(ChineseTokenize.class.getClassLoader().getResourceAsStream("StanfordCoreNLP-chinese.properties"));
			// build pipeline
			pipeline = new StanfordCoreNLP(props);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
	
	public static void main(String[] args) throws Exception {
		stanfordTokenize();
	}
	
	public static void stanfordTokenize() throws Exception {
        List<String> lines = FileUtils.readLines(new File("D:\\story_split.txt") , "UTF-8");
        List<String> destLines = new ArrayList<String>();
        for (String line : lines) {
			destLines.add(tokenize(line));
		}
        
        FileUtils.writeLines(new File("D:\\stanford_tokenize.txt"), "UTF-8", destLines, false);
	}
	
	public static String tokenize(String text){
		// create a document object
    	CoreDocument document = new CoreDocument(text);
    	// annnotate the document
    	pipeline.annotate(document);
    	StringBuilder sb = new StringBuilder("");
    	List<CoreLabel> tokens = document.tokens();
    	for (CoreLabel token : tokens) {
    		String val = token.word();
			//词性
			//String lbl = token.tag();
			//sb.append(val+Constants.SPEECH+lbl);
			sb.append(val);
			sb.append(Constants.SEPARATOR);
    	}
    	return sb.toString();
	}

}