package hanlp;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;

import java.util.List;

/**
 * <p>分词器相关：</p>
 * <p>中科院计算所NLPIR http://ictclas.nlpir.org/nlpir/ </p>
 * <p>ansj分词器 https://github.com/NLPchina/ansj_seg </p>
 * <p>哈工大的LTP https://github.com/HIT-SCIR/ltp </p>
 * <p>清华大学THULAC https://github.com/thunlp/THULAC </p>
 * <p>斯坦福分词器 https://nlp.stanford.edu/software/segmenter.shtml </p>
 * <p>Hanlp分词器 https://github.com/hankcs/HanLP </p>
 * <p>结巴分词 https://github.com/yanyiwu/cppjieba </p>
 * <p>KCWS分词器(字嵌入+Bi-LSTM+CRF) https://github.com/koth/kcws </p>
 * <p>ZPar https://github.com/frcchang/zpar/releases </p>
 * <p>IKAnalyzer https://github.com/wks/ik-analyzer </p>
 *
 *
 * <p>分词器评估报告：https://github.com/ysc/cws_evaluation </p>
 */
public class TestHanlp {

	public static void main(String[] args) {
		Segment segment = HanLP.newSegment().enableCustomDictionary(true).enableOrganizationRecognize(true);
		List<Term> terms = segment.seg("湖北武汉430070");
		System.out.println(terms);
	}
}
