package NLP;

import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.StandardTokenizer;
import entity.Word;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

public class Segmenter {

    /**
     * 对text分词, 对分词结果包装成Word
     * 注意: text中不能含有标点符号和空格, 否则分词结果中就会出现标点符号和空格
     * @param text
     * @return
     */
    public List<Word> segment(String text){
        List<Term> termList = StandardTokenizer.segment(text);
        List<Word> wordSet = new ArrayList<Word>();
        for (Term term:termList){
            Word word = new Word();
            word.setWord(term.word);
            word.setPos(term.nature.toString());
            // 因为重载了word的equal方法, 只要word和pos这两个字段一致就认为是equal
            wordSet.add(word);
        }
        return wordSet;
    }
}
