package com.nlp.keywords;

import com.hankcs.hanlp.mining.word.TfIdfCounter;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;
import com.samp.util.FilesUtils;

import java.util.List;
import java.util.Map;

public class TfIdfDemo {
    private static void hanlpTfidf(String content){
//        System.out.println(content);
        TfIdfCounter tfidf = new TfIdfCounter();
        tfidf.setSegment(NLPTokenizer.ANALYZER);
        tfidf.add(content);
        List<String> list = tfidf.getKeywords(content,10);
        System.out.println(list);
        Map<String, Double> tfMap = tfidf.allTf();
        System.out.println(tfMap);
        Map<Object, Map<String, Double>> a = tfidf.compute();
        System.out.println(a);
    }

    private static void hanlpTfidf(List<String> contents, String data){
//        System.out.println(content);
        TfIdfCounter tfidf = new TfIdfCounter();
        tfidf.setSegment(NLPTokenizer.ANALYZER);
        for(String content:contents){
            tfidf.add(content);
        }
//        tfidf.getKeywordsWithTfIdf()
        List<Map.Entry<String, Double>> list = tfidf.getKeywordsWithTfIdf(data,10);
        System.out.println(list);
        Map<String, Double> tfMap = tfidf.allTf();
        System.out.println(tfMap);
        Map<Object, Map<String, Double>> a = tfidf.compute();
        System.out.println(a);
    }

    public static void main(String[] args) {
//        String filePath = "D:\\nas\\nlp\\keywords\\text01_tokenize.txt";
        String filePath = "D:\\nas\\nlp\\keywords\\text01.txt";
        String content = FilesUtils.getTextFromPath(filePath);
        List<String> contents = FilesUtils.getTextsFromPath(filePath);
        try{
            hanlpTfidf(contents, content);
        }catch (Exception e){
            e.printStackTrace();
        }
    }
}
