package com.xwtec.crawler.service.impl;

import com.xwtec.crawler.service.ReptileService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.util.*;

@Service
public class ReptileServiceImpl implements ReptileService {
    @Autowired
    CrawlerServiceImpl crawlerService;

    private long start = System.currentTimeMillis();

    private static int count = 0;
    @Override
    public void threeLevelJob(List<String> firstUrlList) {

        //一级页面
        Map<String, Object> firstLevelMap = new HashMap<>();
        if(firstUrlList != null && firstUrlList.size() > 0){
            for(int i=0; i<firstUrlList.size(); i++){
                count++;
                String firstLevelUrl = firstUrlList.get(i);

                long firstStart = System.currentTimeMillis();
                firstLevelMap = crawlerService.getWordAndUrl(firstLevelUrl);
                long firstEnd = System.currentTimeMillis();
                int firstLevelCount = ((Set)firstLevelMap.get("urls")).size();
                System.out.println("爬取第"+count+"个页面["+firstLevelUrl+"] 花费时间: " + (firstEnd-firstStart)
                        +" 包含["+firstLevelCount+"]个链接, 一级页面");
                // 敏感词检测
                checkSensitiveWords(firstLevelMap, count, firstLevelUrl, 1);

                //二级页面
                Set<String> secondUrlSet = (Set)firstLevelMap.get("urls");
                if(secondUrlSet != null && secondUrlSet.size() > 0){
                    for (String secondLevelUrl : secondUrlSet){
                        count++;
                        long secondStart = System.currentTimeMillis();
                        Map<String, Object> secondLevelMap = crawlerService.getWordAndUrl(secondLevelUrl);
                        long secondEnd = System.currentTimeMillis();
                        if (secondLevelMap.size() > 0){
                            int secondLevelCount = ((Set)secondLevelMap.get("urls")).size();
                            System.out.println("爬取第"+count+"个页面["+firstLevelUrl+">"+secondLevelUrl+"] 花费时间: " + (secondEnd-secondStart)
                                    +" 包含["+secondLevelCount+"]个链接, 二级页面");
                            // 敏感词检测
                            checkSensitiveWords(secondLevelMap, count, firstLevelUrl+">"+secondLevelUrl, 2);
                        }


                        //三级页面
                        Set<String> threeUrlSet = (Set)secondLevelMap.get("urls");
                        if(threeUrlSet != null && threeUrlSet.size() > 0){
                            for (String threeLevelUrl : threeUrlSet){
                                count++;
                                long threeStart = System.currentTimeMillis();
                                Map<String, Object> threeLevelMap = crawlerService.getWordAndUrl(threeLevelUrl);
                                long threeEnd = System.currentTimeMillis();
                                if (threeLevelMap.size() > 0){
                                    int threeLevelCount = ((Set)threeLevelMap.get("urls")).size();
                                    System.out.println("爬取第"+count+"个页面["+firstLevelUrl+">"+secondLevelUrl+">"+threeLevelUrl+"] 花费时间: " + (threeEnd-threeStart)
                                            +" 包含["+threeLevelCount+"]个链接, 三级页面");
                                    // 敏感词检测
                                    checkSensitiveWords(threeLevelMap, count, firstLevelUrl+">"+secondLevelUrl+">"+threeLevelUrl, 3);
                                }
                            }
                        }
                    }
                }
            }
        }

    }

    @Override
    public void secondLevelJob(List<String> firstUrlList) {
        //一级页面
        Map<String, Object> firstLevelMap = new HashMap<>();
        if(firstUrlList != null && firstUrlList.size() > 0){
            for(int i=0; i<firstUrlList.size(); i++){
                count++;
                String firstLevelUrl = firstUrlList.get(i);

                long firstStart = System.currentTimeMillis();
                firstLevelMap = crawlerService.getWordAndUrl(firstLevelUrl);
                long firstEnd = System.currentTimeMillis();
                int firstLevelCount = ((Set)firstLevelMap.get("urls")).size();
                System.out.println("爬取第"+count+"个页面["+firstLevelUrl+"] 花费时间: " + (firstEnd-firstStart)
                        +" 包含["+firstLevelCount+"]个链接, 一级页面");
                // 敏感词检测
                checkSensitiveWords(firstLevelMap, count, firstLevelUrl, 1);

                //二级页面
                Set<String> secondUrlSet = (Set)firstLevelMap.get("urls");
                if(secondUrlSet != null && secondUrlSet.size() > 0){
                    for (String secondLevelUrl : secondUrlSet){
                        count++;
                        long secondStart = System.currentTimeMillis();
                        Map<String, Object> secondLevelMap = crawlerService.getWordAndUrl(secondLevelUrl);
                        long secondEnd = System.currentTimeMillis();
                        if (secondLevelMap.size() > 0){
                            int secondLevelCount = ((Set)secondLevelMap.get("urls")).size();
                            System.out.println("爬取第"+count+"个页面["+firstLevelUrl+">"+secondLevelUrl+"] 花费时间: " + (secondEnd-secondStart)
                                    +" 包含["+secondLevelCount+"]个链接, 二级页面");
                            // 敏感词检测
                            checkSensitiveWords(secondLevelMap, count, firstLevelUrl+">"+secondLevelUrl, 2);
                        }
                    }
                }
            }
        }
    }

    // 敏感词检测
    private void checkSensitiveWords(Map<String, Object> map, int totalCount, String url, int level){
        long checkStart = System.currentTimeMillis();
        crawlerService.checkSensitiveWords(map, url, level);
        long checkEnd = System.currentTimeMillis();
        //System.out.println("检测当前页面敏感词花费时间: " + (checkEnd-checkStart));

        long end = System.currentTimeMillis();

        System.out.println("总爬取页面数为["+totalCount+"], 总共耗时["+((end - start) / 1000)+"]秒");
    }
}
