package phoenics.app.service.impl;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.CoreDictionary;
import com.hankcs.hanlp.dictionary.CoreDictionary.Attribute;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.seg.NShort.NShortSegment;
import com.hankcs.hanlp.seg.common.Term;

import phoenics.app.service.AppKeyWords;

public class HanlpAppKeyWords implements AppKeyWords{
	private static org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HanlpAppKeyWords.class);

	@Override
	public List<String> takeKeyWords(String text,boolean ispeat) {
		if(text==null || text.trim().length()==0){
			return null;
		}
		 NShortSegment nShortSegment=new NShortSegment();
		 nShortSegment.enableOffset(true).enableIndexMode(true);
		if(ispeat){
			return nShortSegment.seg(text).stream().map(e->e.word).collect(Collectors.toList());
		}else{
			return nShortSegment.seg(text).stream().map(e->e.word).distinct().collect(Collectors.toList());
		}
		
	}

	@Override
	public List<String> takeKeyWords(final String text, List<String> words,boolean ispeat) {
		if(text==null || text.trim().length()==0){
			return null;
		}
		if(words==null || words.size()==0){
			return null;
		}
		List<String> words_=words.stream().filter(e->text.toLowerCase().contains(e.toLowerCase())).collect(Collectors.toList());
		 //List<String> keyWords=new ArrayList<>();
		 //List<Term> terms=HanLP.segment(text);
		 NShortSegment nShortSegment=new NShortSegment();
		 NShortSegment nShortSegmentIndex=new NShortSegment();
		 nShortSegmentIndex.enableOffset(true).enableIndexMode(true);
		 //List<Term> terms=nShortSegment.seg(text);
		 List<String> keyWords= nShortSegment.seg(text).stream().map(e->e.word).collect(Collectors.toList());
		 List<String> keyWordsIndex= nShortSegmentIndex.seg(text).stream().map(e->e.word).collect(Collectors.toList());
		 keyWords.addAll(keyWordsIndex);
		if(keyWords.size()==0){
			return null;
		}
		keyWords.addAll(words_);
		if(ispeat){
			return keyWords;
		}else{
		    return keyWords.stream().distinct().collect(Collectors.toList());
		}
	}

	@Override
	public boolean hasKeyWord(String text, List<String> words) {
		if(text==null || text.trim().length()==0){
			return false;
		}
		if(words==null || words.size()==0){
			return false;
		}
		List<Term> terms=HanLP.segment(text);
		for(String word:words){
			for(Term term:terms){
				if(term.word.equals(word)){
					return true;
				}
			}
		}
		return false;
	}

	@Override
	public boolean hasKeyWord(String text, String word) {
		if(text==null || text.trim().length()==0){
			return false;
		}
		if(word==null || word.trim().length()==0){
			return false;
		}
		List<Term> terms=HanLP.segment(text);
		for(Term term:terms){
			if(term.word.equals(word)){
				return true;
			}
		}
		return false;
	}
	public static void main(String [] args){
		HanLP.Config.DEBUG=true;
		Attribute  attribute =CoreDictionary.Attribute.create("nz 1024");
		CoreDictionary.trie.set("!5", attribute);
		 CustomDictionary.insert("!5", "nz 1024");
		AppKeyWords appKeyWords=new HanlpAppKeyWords();
		List<String> dds=appKeyWords.takeKeyWords("那么如何建立节点之间,!51333找到一个词A的后续", true);
		System.out.println(dds);
	}
}
