package com.nlp.keywords;

import com.hankcs.hanlp.corpus.occurrence.TermFrequency;
import com.hankcs.hanlp.mining.word.TermFrequencyCounter;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;
import com.samp.util.FilesUtils;

import java.util.Collection;
import java.util.List;

public class TfDemo {

    private static void tf(String content){
        TermFrequencyCounter tf = new TermFrequencyCounter();
        tf.setSegment(NLPTokenizer.ANALYZER);
        List<String> list = tf.getKeywords(content,10);
        System.out.println(list);
        Collection<TermFrequency> topN = tf.top(10);
        System.out.println(topN);
    }

    public static void main(String[] args) {
//        String filePath = "D:\\nas\\nlp\\keywords\\text01_tokenize.txt";
        String filePath = "D:\\nas\\nlp\\keywords\\text01.txt";
        String content = FilesUtils.getTextFromPath(filePath);
        try{
            tf(content);
        }catch (Exception e){
            e.printStackTrace();
        }
    }
}
