import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;

import java.io.*;
import java.util.ArrayList;
import java.util.List;

/**
 * 把三国演义文本分词
 */
public class HanLPMain {

    public static void main(String[] args) throws IOException {
        try {
            FileInputStream fis = new FileInputStream("src/main/resources/sanguo.txt");
            InputStreamReader inputReader = new InputStreamReader(fis);
            BufferedReader bf = new BufferedReader(inputReader);
            StringBuilder content = new StringBuilder();
            List<String> wordList = new ArrayList<>();

            String str;
            while((str = bf.readLine()) != null){
                content.append(str);
                // TODO(mjb)： 是否需要添加换行符？
                content.append("\n");
            }

            //关闭的时候只需要关闭最外层的流就行了
            bf.close();
            inputReader.close();
            String termsWords = getTermsWords(HanLP.segment(content.toString()));
//            System.out.println(termsWords);

            // 输出分词后的三国演义文本
            FileOutputStream fos=new FileOutputStream("sanguo_nlp.txt");
            BufferedOutputStream bos=new BufferedOutputStream(fos);

            bos.write(termsWords.getBytes(),0,termsWords.getBytes().length);
            bos.flush();
            bos.close();
        } catch (Exception e) {
            e.printStackTrace();
        }

    }

  public static void printTerms(List<Term> termList) {
        termList.forEach(t -> System.out.print(t.word + " "));
      System.out.print("\n");
  }

  private static String getTermsWords (List<Term> termList) {
        List<String> result = new ArrayList<>();
        termList.forEach(t -> result.add(t.word));
        return String.join(" ",result);
  }
}
