package com.nlp.tokenize;

import com.google.gson.Gson;
import com.hankcs.hanlp.corpus.tag.Nature;
import com.hankcs.hanlp.model.crf.CRFLexicalAnalyzer;
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;
import com.samp.util.FilesUtils;
import com.samp.util.HtmlUtils;
import org.apache.commons.lang3.StringUtils;
import org.bouncycastle.jcajce.provider.digest.MD2;

import java.io.IOException;
import java.util.*;

public class HanlpTokenize {

    private static Set<String> STOP_WRODS_SET = new HashSet<>();
    static {
        String path = "D:\\nas\\nlp\\stopwords.txt";
        List<String> stopwords = FilesUtils.getTextsFromPath(path);
        System.out.println("加载停用词个数："+stopwords.size()+",for path="+path);
        STOP_WRODS_SET.addAll(stopwords);
    }

    private static void tokenizeContent(String content, String outFile) throws IOException {
        content = content.replaceAll(" ",",");
        List<Term> list = NLPTokenizer.segment(content);
        List<String> tmp = applyStopWords(list);
        FilesUtils.saveListToFile(tmp,outFile,"#");
    }

    private static void tokenizeContent(List<String> contents, String outFile) throws IOException {
//        System.out.println(content);
//        List<Term> list = HanLP.segment(content);
        List<List<String>> result = new ArrayList<>();
//        CRFLexicalAnalyzer analyzer = new CRFLexicalAnalyzer();
        for(String content:contents){
            //分词前将所有的空格，抱成逗号，以免NLP识别出误。
            content = content.replaceAll(" ",",");
            List<Term> list = NLPTokenizer.segment(content);
//            List<Term> list = analyzer.seg(content);
            List<String> tmp = applyStopWords(list);
            result.add(tmp);
        }
//        List<Term> list = HanLP.newSegment().enableAllNamedEntityRecognize(true).seg(content);
//        CoreStopWordDictionary.apply(list);
//        System.out.println(result);
        FilesUtils.saveListsToFile(result,outFile,"#");
    }

    private static List<String> applyStopWords(List<Term> list){
        List<String> result = new ArrayList<>();
        for(Term term:list){
            if( term.nature.equals(Nature.w)){
                continue;
            }
            if( STOP_WRODS_SET.contains(term.word) ){
                continue;
            }
            if( term.word.length() == 1 ){
                continue;
            }
            result.add(term.word.trim());
        }
        return result;
    }



    public static void main(String[] args) {
//        String filePath = "D:\\nas\\nlp\\keywords\\text01.txt";
//        String outPath = "D:\\nas\\nlp\\keywords\\text01_tokenize.txt";
//        String content = FilesUtils.getTextFromPath(filePath);
//        List<String> contentList = FilesUtils.getTextsFromPath(filePath);
        try{
//            tokenizeContent(contentList,outPath);
            String callMethod = "18681";
            if( "zaobao".equals(callMethod) ) {
                String path = "D:\\nas\\nlp\\keywords\\zaobao\\";
                String toPath = "D:\\nas\\nlp\\keywords\\zaobao\\processed\\";
                List<String> fileList = FilesUtils.getFilesFromDir(path);
                for (int i = 0; i < fileList.size(); i++) {
                    String file = path + fileList.get(i);
                    List<String> contentList = FilesUtils.getTextsFromPath(file);
                    System.out.println("process file" + file);
                    tokenizeContent(contentList, toPath + "0" + (i + 1) + ".txt");
                }
            }
            if( "18681".equals(callMethod) ) {
                String path = "D:\\nas\\nlp\\keywords\\18681\\";
                String toPath = "D:\\nas\\nlp\\keywords\\18681\\processed\\";
                List<String> fileList = FilesUtils.getFilesFromDir(path);
                Gson gson = new Gson();
                for (int i = 0; i < fileList.size(); i++) {
                    String file = path + fileList.get(i);
                    String data = FilesUtils.getTextFromPath(file);
                    Map map = gson.fromJson(data,HashMap.class);
                    String content = (String)map.get("content");
                    if(StringUtils.isNotBlank(content) ) {
                        String clearContent = HtmlUtils.removeHtmlTags(content);
                        tokenizeContent(clearContent,toPath + fileList.get(i));
                    }
                    System.out.println("process file" + file);
//                    tokenizeContent(contentList, toPath + "0" + (i + 1) + ".txt");
                }
            }


        }catch (Exception e){
            e.printStackTrace();
        }
    }

}

