package com.example.news_demo;

import com.example.news_demo.entity.Content;
import com.example.news_demo.service.ArticleService;
import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import com.example.news_demo.utils.TFIDFAnalyzer;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;

import javax.annotation.Resource;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.sql.SQLException;
import java.util.*;

@SpringBootTest
@RunWith(SpringRunner.class)
public class test {
    @Autowired
    private ArticleService articleService ;

    // private Map<String, Double> idfmap = new HashMap<String, Double>();
    public HashSet<String> stopWordsSet;

    @Test
    public void qu() {
        if (stopWordsSet == null) {
            stopWordsSet = new HashSet<>();
            new TFIDFAnalyzer().loadStopWords(stopWordsSet, this.getClass().getResourceAsStream("/static/stop_words.txt"));
        }
    }

    @Test
    public void set_idf() throws IOException {
        List<Content> list = articleService.selectContent();
        System.out.println(list);
        long startTime = System.currentTimeMillis();
        System.out.println("开始计时：");
        long d = (System.currentTimeMillis() - startTime) / 1000;
        long minute = d / 60;
        long second = d % 60;
        System.out.println("读取数据完成，耗时：" + minute + "分" + second + "秒");

        long startTime2 = System.currentTimeMillis();
        JiebaSegmenter segmenter = new JiebaSegmenter();
        Map<String, Integer> idfmap2 = new HashMap<String, Integer>();
        int  op= list.size();
        for (int i = 0; i < list.size(); i++) {

            List<SegToken> tokenList = segmenter.process(list.get(i).getContent(), JiebaSegmenter.SegMode.SEARCH);
            HashSet<String> mamap = new HashSet<String>();


            for (SegToken token : tokenList) {
                // 遍历列表，从列表中每个SegToken对象中，获得word。

                if (!stopWordsSet.contains(token.word) && token.word.length() > 1) {
                    mamap.add(token.word);
                }
            }
            for (String wo:mamap){
                if (idfmap2.containsKey(wo)){
                    int z=  idfmap2.get(wo);
                    z++;
                    idfmap2.put(wo,z);
                }else {
                    idfmap2.put(wo,1);
                }
            }

        }
        d = (System.currentTimeMillis() - startTime2) / 1000;
        minute = d / 60;
        second = d % 60;
        System.out.println("分词1000条完成，耗时：" + minute + "分" + second + "秒");
        String path = "C:\\Users\\asus\\Desktop\\1.txt";
        File file = new File(path);
        if(!file.exists()){
            file.getParentFile().mkdirs();
        }
        file.createNewFile();
        FileWriter fw = new FileWriter(file, true);
        BufferedWriter bw = new BufferedWriter(fw);


        String key = null;
        double value ;
        // 获取键集合的迭代器
        Iterator it = idfmap2.keySet().iterator();
        while (it.hasNext()) {
            key = (String) it.next();
            value =  idfmap2.get(key);
            bw.write(key+" "+value);
            bw.newLine();
        }
        bw.flush();
        bw.close();
        fw.close();

    }



    public static void main(String[] args) throws SQLException,IOException {

        new com.example.news_demo.test().qu();
//        String s = "深圳地铁将设立ＶＩＰ头等车厢　买双倍票可享坐票";
//        // 生成一个Segmenter分词对象
//        JiebaSegmenter segmenter = new JiebaSegmenter();
//        // 调用分词方法 segmenter.process(需要分词的字符串 ,分词模式)，返回的是一个SegToken对象的列表
//        List<SegToken> tokenList = segmenter.process(s, JiebaSegmenter.SegMode.SEARCH);
//        HashSet<String> mamap = new HashSet<String>();
//        for (SegToken token : tokenList) {
//            // 遍历列表，从列表中每个SegToken对象中，获得word。
//            mamap.add(token.word);
//            // System.out.print(token.word+" ");
//        }
//        Iterator<String> iterator = mamap.iterator();
//        while (iterator.hasNext()) {
//            System.out.print(iterator.next() + "\t");
//        }

        // String sql = "SELECT title FROM article";
        // QueryRunner runner = new QueryRunner(DataSourceUtils.getDataSource());
        //List<Article> list = runner.query(sql, new BeanListHandler<article>(article.class));
        new com.example.news_demo.test().set_idf();
        //System.out.println(contents.size());
    }
}
