package cn.ac.jaas.ljx.wordsegmentation.interfaces.impl;

import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;

import cn.ac.jaas.ljx.wordsegmentation.domain.Document;
import cn.ac.jaas.ljx.wordsegmentation.domain.Word;
import cn.ac.jaas.ljx.wordsegmentation.interfaces.Segmenter;
import cn.ac.jaas.ljx.wordsegmentation.util.WordCountUtil;
import org.nlpcn.commons.lang.util.StringUtil;

import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.util.CoreMap;

public class StanfordSegmenter implements Segmenter {

	@Override
	public List<Word> segment(Document document) {
		Properties properties = new Properties();
		try {
			properties.load(new FileInputStream("StanfordCoreNLP-chinese-ctb.properties"));
		}catch (Exception e) {
			e.printStackTrace();
		}
		StanfordCoreNLP ctb = new StanfordCoreNLP(properties);
		String text = document.getTitle() + document.getDigest();
		Annotation annotation; 
		if(StringUtil.isBlank(text)) {
			return new ArrayList<Word>();
		}else {
			annotation = new Annotation(text);
			ctb.annotate(annotation);
		}
		List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
		//StringBuilder result = new StringBuilder();
		List<Word> wordsNoCount = new ArrayList<>();
		for(CoreMap sentence: sentences) {
            for (CoreLabel token: sentence.get(CoreAnnotations.TokensAnnotation.class)) {
                String result = token.get(CoreAnnotations.TextAnnotation.class);;
                Word word = new Word(result);
                wordsNoCount.add(word);
                //result.append(word).append(" ");
            }
        }
		List<Word> wordsCounted = new ArrayList<>();
		WordCountUtil.wordCount(wordsNoCount, wordsCounted);
		return wordsCounted;
		//System.out.println(result.toString());
	}
	public List<Word> segment(List<Document> documents) {
		Properties properties = new Properties();
		try {
			properties.load(new FileInputStream("StanfordCoreNLP-chinese-ctb.properties"));
		}catch (Exception e) {
			e.printStackTrace();
		}
		StanfordCoreNLP ctb = new StanfordCoreNLP(properties);
		StringBuilder sb = new StringBuilder();
		for(Document document : documents) {
			sb.append(document.getTitle() + document.getDigest());
		}
		String text = sb.toString();
		System.out.println("cellect done");
		System.out.println(text);
		Annotation annotation = new Annotation(text);
		ctb.annotate(annotation);
		List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
		//StringBuilder result = new StringBuilder();
		List<Word> wordsNoCount = new ArrayList<>();
		for(CoreMap sentence: sentences) {
            for (CoreLabel token: sentence.get(CoreAnnotations.TokensAnnotation.class)) {
                String result = token.get(CoreAnnotations.TextAnnotation.class);;
                Word word = new Word(result);
                wordsNoCount.add(word);
                //result.append(word).append(" ");
            }
        }
		List<Word> wordsCounted = new ArrayList<>();
		WordCountUtil.wordCount(wordsNoCount, wordsCounted);
		return wordsCounted;
		//System.out.println(result.toString());
	}

}
