package com.example.news_demo;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;

import com.example.news_demo.entity.ArticleWord;
import com.example.news_demo.entity.Vocabulary;
import com.example.news_demo.mapper.VocabularyMapper;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;


import javax.annotation.PostConstruct;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.*;

@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest
public class ReadFile {


    @Autowired
    private VocabularyMapper vocabularyMapper;
    @Test
    @PostConstruct
    public void test() throws IOException {
        long startTime = System.currentTimeMillis();
        int cnt = 1;
        BufferedReader stopBr = null;
        BufferedReader titleBr = null;
        BufferedReader contentBr = null;
        JiebaSegmenter segmenter = new JiebaSegmenter();
        HashMap<String, String> stopMap = new LinkedHashMap<>();
        HashMap<String, Integer> map = new LinkedHashMap<>();
        List<ArticleWord> articleWords = new ArrayList<>();
        HashMap<String, Integer> titleMap = new LinkedHashMap<>();
        HashMap<String, Integer> contentMap = new LinkedHashMap<>();
        HashSet<String> set = new LinkedHashSet<>();
//        List<String> strings = new List<>();
        try {
            //1.首先读取停用词文件   并存入map中
            String stopName = "C:\\Users\\asus\\Desktop\\stop_words.txt";
            stopBr = new BufferedReader(new FileReader(stopName));
            String s = "";
            while ((s = stopBr.readLine()) != null) {
                stopMap.put(s, s);
            }


            //2.读取文章标题和内容
            String titleName = "C:\\Users\\asus\\Desktop\\title.txt";
            titleBr = new BufferedReader(new FileReader(titleName));
            String contentName = "C:\\Users\\asus\\Desktop\\content.txt";
            contentBr = new BufferedReader(new FileReader(contentName));
            String titleStr = "";

            titleBr.readLine(); //因为text文件首行是title content
            contentBr.readLine();
            //标题和内容的数量是一样的
            while((titleStr = titleBr.readLine()) != null){
                //对标题进行分词
                List<SegToken> process = segmenter.process(titleStr, JiebaSegmenter.SegMode.SEARCH);

                for (SegToken segToken : process) {
                    if(stopMap.get(segToken.word) == null && segToken.word.length()>=2){ //去除停用词 在停用词map中找不到
                        //统计
                        //1.统计word在所有文章中出现的次数
                        Integer integer = map.get(segToken.word);
                        Integer titleInt = titleMap.get(segToken.word);
                        if(integer==null) integer=0;
                        integer+=1;
                        if(titleInt==null) titleInt=0;
                        titleInt+=1;
                        map.remove(segToken.word);
                        map.put(segToken.word,integer);
                        //统计word在title中出现的次数 循环结束后要清空
                        titleMap.put(segToken.word,titleInt);
                        set.add(segToken.word);
                    }
                }

                //读取文章内容
                String contentStr = contentBr.readLine();
                //对内容进行分词
                process = segmenter.process(contentStr, JiebaSegmenter.SegMode.SEARCH);
                for (SegToken segToken : process) {
                    if(stopMap.get(segToken.word) == null && segToken.word.length()>=2){ //去除停用词 在停用词map中找不到
                        //统计
                        //1.统计word在所有文章中出现的次数
                        Integer integer = map.get(segToken.word);
                        Integer contentInt= contentMap.get(segToken.word);
                        if(integer==null) integer=0;
                        integer+=1;
                        if(contentInt==null) contentInt=0;
                        contentInt+=1;
                        map.remove(segToken.word);
                        map.put(segToken.word,integer);
                        //统计word在content中出现的次数 循环结束后要清空
                        contentMap.put(segToken.word,contentInt);
                        set.add(segToken.word);
                    }
                }
                /*for (String s1 : set) {
                    ArticleWord articleWord = new ArticleWord();
                    articleWord.setArticleId(cnt);
                    articleWord.setWord(s1);
                    Integer freqsTitle = titleMap.get(s1);
                    if(freqsTitle==null) freqsTitle=0;
                    Integer freqsContent = contentMap.get(s1);
                    if(freqsContent==null) freqsContent=0;
                    articleWord.setFreqsTitle(freqsTitle);
                    articleWord.setFreqsContent(freqsContent);
                    articleWord.setFreqs(freqsContent+freqsTitle);
                    articleWords.add(articleWord);
                }
                if(cnt%1000==0){
                    //开始往article_word表中插入数据 数据在articleWords中
                    vocabularyMapper.insertArticleWord(articleWords);
                    System.out.println("---------"+cnt+"---篇过去了----------------------------"+ (System.currentTimeMillis() - startTime) + "毫秒");
                    articleWords.clear();
                }*/
                /*set.clear();
                titleMap.clear();
                contentMap.clear();
                cnt+=1;*/
//                if(cnt==10001) break;
            }
            /*if((cnt-1)%1000!=0){
                vocabularyMapper.insertArticleWord(articleWords);
                articleWords.clear();
            }*/
            //缺少 将后面的部分插入
//            结束循环后开始往vocabulary插入数据  数据在map表中
            List<Vocabulary> vocabularies = new ArrayList<>();
            for(Map.Entry<String,Integer> entry : map.entrySet()) {
                String str = entry.getKey();
                vocabularies.add(new Vocabulary(str,entry.getValue()));
            }
            vocabularyMapper.insertVocabulary(vocabularies);
            map.clear();
            //vocabularies = null;
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            stopBr.close();
            titleBr.close();
            contentBr.close();
        }
    }
}
