package com.ycy.analyzer.ansj;

import org.ansj.domain.Result;
import org.ansj.domain.Term;
import org.ansj.library.AmbiguityLibrary;
import org.ansj.library.DicLibrary;
import org.ansj.recognition.impl.NatureRecognition;
import org.ansj.splitWord.analysis.DicAnalysis;
import org.ansj.splitWord.analysis.NlpAnalysis;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.junit.Before;
import org.junit.Test;
import org.nlpcn.commons.lang.tire.domain.Value;
import org.nlpcn.commons.lang.tire.library.Library;

import java.net.URL;
import java.util.Arrays;
import java.util.List;

/**
 * @Author: ycy
 * @CreateDate: 2019/6/18 12:33
 */
public class AnsjAnalyzerDemo {

  @Before
  public void before() {
    //加载歧义 字典
    URL ansjURL = this.getClass().getClassLoader().getResource("ansj/library/ambiguity.dic");
    AmbiguityLibrary.put(AmbiguityLibrary.DEFAULT, ansjURL.getPath());

    //用户定义 词典 文件
//    DicLibrary.put();

    //词典中加词
    DicLibrary.insert(DicLibrary.DEFAULT ,"苏宁易付宝");



    DicLibrary.insert(DicLibrary.DEFAULT ,"的","stop",11);


    DicLibrary.insert(DicLibrary.DEFAULT ,"的","stop",10000);


  }

  @Test
  public void parse() {
    String str = "苏宁易付宝";
    //标准分词
    Result parse = ToAnalysis.parse(str);
    printAnlayzerRes(parse);
  }

  @Test
  public void parse1() {
    String str = "苏宁易付宝的一个";
//    苏宁易付宝/userDefine
    //DicAnalysis 用户自定义词典优先策略的分词
    Result parse = DicAnalysis.parse(str);
    printAnlayzerRes(parse);
  }

  /**
   * 歧义词语
   */
  @Test
  public void printAnlayzerRes() {
//    MyStaticValue.ENV.put(AmbiguityLibrary.DEFAULT,"D:\\ycy_git_mayun\\cloud\\ycylucene\\src\\main\\resources\\library\\ambiguity.dic");
    Result result = NlpAnalysis.parse("据说川府办发的发文很厉害");
    //据说/v,川府/nw,办发/j,的/u,发文/v,很/d,厉害/a
    System.out.println(result);
    Value value = new Value("川府办发", "川府办", "nw", "发", "j");
//    Library.insertWord(UserDefineRecognition.);
    Result result1 = ToAnalysis.parse("据说川府办发的发文很厉害");
    Library.insertWord(AmbiguityLibrary.get(), value);
    System.out.println(result1);
  }


  @Test
  public void cixing() {
    Result parse = ToAnalysis.parse("Ansj中文分词是一个真正的ict的实现.并且加入了自己的一些数据结构和算法的分词.实现了高效率和高准确率的完美结合!");
    System.out.println(parse.getTerms()); //词性标注
  }

  @Test
  public void cixing2() {
    String[] strs = {"对", "非", "ansj", "的", "分词", "结果", "进行", "词性", "标注"};
    List<String> lists = Arrays.asList(strs);
    List<Term> recognition = new NatureRecognition().recognition(lists, 0);
    System.out.println(recognition);
  }

  public void printAnlayzerRes(Result parse) {
    for (Term term : parse.getTerms()) {
      System.out.println(term);
    }
  }

}
