package com.kongchengji.spider.station.process.processImpl;

import com.kongchengji.spider.station.constant.Constant;
import com.kongchengji.spider.station.dao.PicSpiderDao;
import com.kongchengji.spider.station.dao.ThumbnailDao;
import com.kongchengji.spider.station.domain.PicSpiderDO;
import com.kongchengji.spider.station.domain.ThumbnailDO;
import com.kongchengji.spider.station.extention.MySpiderMonitor;
import com.kongchengji.spider.station.extention.MySpiderStatus;
import com.kongchengji.spider.util.ManageImgSrcUtil;
import com.kongchengji.spider.util.RemoveNullUtil;
import com.kongchengji.spider.util.RequestTools;
import com.kongchengji.spider.util.SpringContextUtil;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.selector.Html;


import java.util.*;
import java.util.regex.Pattern;

/**
 * 爬虫逻辑处理
 */
public class StationProcess implements PageProcessor {
    private Constant.Station station;
    private int sleepTime = 0;
    private boolean isDirectlyGetTitle;
    private boolean isFindPage;
    private ThumbnailDao thumbnailDao;
    private PicSpiderDao picSpiderDao;
    private PicSpiderDO picSpiderDO;
    public StationProcess(Constant.Station station, boolean isDirectlyGetTitle, boolean isFindpage) {
        this.isDirectlyGetTitle = isDirectlyGetTitle;
        this.isFindPage = isFindpage;
        this.station = station;
        thumbnailDao = SpringContextUtil.getBean(ThumbnailDao.class);
        picSpiderDao = SpringContextUtil.getBean(PicSpiderDao.class);
        picSpiderDO = picSpiderDao.get(station.getId());
    }
    private Site site = Site
            .me()
            .setSleepTime(sleepTime)
            .setRetryTimes(3)
            .setUserAgent("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31");

    public void manage(Page page, boolean isDirectlyGetTitle) throws Exception {
        Logger logger = LoggerFactory.getLogger(StationProcess.class);
        if (picSpiderDO == null) {
            throw new Exception("不存在该站点");
        }
        //判断是否是第一次爬取
//        isFirstCraw(picSpiderDO);
        //判断是否需要获取缩略图
        if (StringUtils.isNotBlank(picSpiderDO.getThumbnailXsoup())) {
            getThumbnail(page,picSpiderDO);
        }
        List<String> urlContentList = page.getHtml().xpath(picSpiderDO.getPicUrlXsoup()).all();
        if (urlContentList.size() == 0) {
            //排除天堂图片的判断
            if(station.getId() != 5){
                logger.error("无法获取大图");
                addTargetUrl(page,picSpiderDO);
                return;
            }
        }
        //判断是否够需要添加图片前缀
        String urlPrefix = picSpiderDO.getUrlPrefix();
        if(StringUtils.isNotBlank(urlPrefix)) {
            //给集合元素添加前缀
           updateUrlContentList(urlContentList,urlPrefix);
        }
        List<String> typeList = new ArrayList<>();
        if(StringUtils.isNotBlank(picSpiderDO.getTypeXsoup())){
            typeList.addAll(page.getHtml().xpath(picSpiderDO.getTypeXsoup()).all());
            //去除空的元素
            typeList = RemoveNullUtil.removeNull(typeList);
        }else {//如果没有栏目，默认给个首页栏目
            typeList.add("首页");
        }
        if (isDirectlyGetTitle) {
            String title = page.getHtml().xpath(picSpiderDO.getTitleXsoup()).toString();
            if (StringUtils.isNotBlank(title)) {
                //替换标题后缀
                if (StringUtils.isNotBlank(picSpiderDO.getTitleSuffixRegex())) {
                    String titleSuffixRegex = picSpiderDO.getTitleSuffixRegex();
                    title = StringUtils.trim(title.replaceAll(titleSuffixRegex, ""));
                }
                page.putField("title", title);
            } else {
                logger.error("无法获取标题");
                addTargetUrl(page,picSpiderDO);
                return;
            }
        }

        if (typeList.size() == 0) {
            logger.error("typeList为空");
            //这一步的作用是防止不是最终页被匹配的情况
            addTargetUrl(page,picSpiderDO);
            return;
        }
        //判断该站点是不是没有图集的,全是
        if (StringUtils.isNotBlank(picSpiderDO.getNextPicXsoup())) {
            //处理点击下一页跳入下一个图集的情况
            getNextPage(urlContentList, page);
        }
        page.putField("typeList",typeList);
        //防止重复图片链接
        excluderRepeatList(urlContentList);
        page.putField("urlContentList", urlContentList);
        //为图集的唯一值
        page.putField("thumbnail", page.getUrl().toString());
    }


    @Override
    public void process(Page page) {
        try {
            manage(page, isDirectlyGetTitle);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    @Override
    public Site getSite() {
        return site;
    }

    public void setSleepTime(int sleepTime) {
        this.sleepTime = sleepTime;
    }

    /**
     * 给集合中的元素添加前缀
     * @param urlContentList 集合
     * @param urlPrefix 前缀
     */
    private void updateUrlContentList(List urlContentList,String urlPrefix) {

        List<String> newUrlContentList = new ArrayList<>();
        //将已经含前缀的先添加进集合
        urlContentList.stream().filter(url -> ((String)url).contains(urlPrefix)).forEach(url -> newUrlContentList.add((String) url));
        //将不含前缀的补足前缀后添加进集合
        urlContentList.stream().filter(url -> !((String)url).contains(urlPrefix)).forEach(url -> newUrlContentList.add(urlPrefix + url));
        //清空原来的集合
        urlContentList.clear();
        //将新的集合添加进原集合
        urlContentList.addAll(newUrlContentList);
    }

    /**
     * 集合去重
     *
     * @param list 有重复元素的list集合
     * @return 去重后的集合
     */
    private List<String> excluderRepeatList(List<String> list) {
        HashSet<String> set = new HashSet();
        ArrayList<String> newList = new ArrayList();
        for (String url : list) {
            if (set.add(url)) {
                newList.add(url);
            }
        }
        list.clear();
        list.addAll(newList);
        return list;
    }

    /**
     * 根据规则排除某些链接
     * @param urlList 链接集合
     * @param rule 排除规则
     * @return  返回排除后的链接
     */
    private List<String> excludeByRule(List<String> urlList, String rule) {
        Iterator<String> it = urlList.iterator();
        while (it.hasNext()) {
            String url = it.next();
            Pattern pattern = Pattern.compile(rule);
            if (pattern.matcher(url).find()) {
                it.remove();
            }
        }
        return urlList;
    }

    /**
     * 找到新的下一页链接并加入集合
     * @param oldList 分页下的所有链接
     * @param urlContentSet 去重后的分页链接
     * @param html 分页的html对象
     */
    private void managePage(List<String> oldList, HashSet<String> urlContentSet, Html html) {
        List<String> newList;
        if(StringUtils.isNotBlank(picSpiderDO.getNextPicRegex())){
             newList = html.xpath(picSpiderDO.getNextPicXsoup()).links().regex(picSpiderDO.getNextPicRegex()).all();
        }else {
             newList = html.xpath(picSpiderDO.getNextPicXsoup()).links().all();
        }
        excluderRepeatList(newList);
        for (String url : newList) {
            //重复的元素无法添加进集合，返回false;
            if (urlContentSet.add(url)) {
                oldList.add(url);
            }
        }
    }

    /**
     * 拿到分页链接集合的处理
     * @param urlContentList
     * @param page
     */
    private void getNextPage(List<String> urlContentList, Page page) {
        List<String> list;
        if(StringUtils.isNotBlank(picSpiderDO.getNextPicRegex())){
            list = page.getHtml().xpath(picSpiderDO.getNextPicXsoup()).links().regex(picSpiderDO.getNextPicRegex()).all();
        }else {
            page.getHtml().xpath(picSpiderDO.getNextPicXsoup()).toString();
            list = page.getHtml().xpath(picSpiderDO.getNextPicXsoup()).links().all();
        }
        HashSet<String> urlContentSet = new HashSet<>(list);
        list = excluderRepeatList(list);
        //遍历图集下的分页链接
        if (list.size() != 0) {
            String url;
            for (int index = 0; index < list.size(); index++) {
                if (StringUtils.isNotBlank(picSpiderDO.getTotalPageXsoup())) {
                   int totalPage = Integer.valueOf(page.getHtml().xpath(picSpiderDO.getTotalPageXsoup()).toString());
                   if(index+1 == totalPage){
                       break;
                   }
                }
                url = list.get(index);
                try {
                    String htmlContent;
                    //分页下的当前页和简单判断下url的合法性
                    if (url.contains("#") || !url.startsWith("http")) {
                        continue;
                    }
                    long startTime = System.currentTimeMillis();
                    htmlContent = RequestTools.processHttpRequest(url,"get",new HashMap<>());
                    Html html = new Html(htmlContent,url);
                    urlContentList.addAll(html.xpath(picSpiderDO.getPicUrlXsoup()).all());
                    //判断是否要添加前缀
                    String urlPrefix = picSpiderDO.getUrlPrefix();
                    if(StringUtils.isNotBlank(urlPrefix)){
                        updateUrlContentList(urlContentList,urlPrefix);
                    }
                    if (isFindPage) {
                        managePage(list, urlContentSet,html);
                    }
                    long endTime = System.currentTimeMillis();
                    System.out.println(endTime - startTime);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }

    /**
     * 发现后续链接
     * @param page 要处理的分页
     * @param picSpiderDO 爬虫对象
     */
    private void addTargetUrl(Page page,PicSpiderDO picSpiderDO){
        if (StringUtils.isNotBlank(picSpiderDO.getExcludeRegex())) {
            page.addTargetRequests(excludeByRule(page.getHtml().links().regex(picSpiderDO.getAppendRegex()).all(), picSpiderDO.getExcludeRegex()));
        } else {
            page.addTargetRequests(page.getHtml().links().regex(picSpiderDO.getAppendRegex()).all());
        }
        page.setSkip(true);
    }

    /**
     * 处理缩略图
     * @param page 要处理的页面
     * @param picSpiderDO 爬虫对象
     */
    private void getThumbnail(Page page,PicSpiderDO picSpiderDO){
        List<String> picBoxs = page.getHtml().xpath(picSpiderDO.getThumbnailXsoup()).all();
        Html html;
        String thumbnailUrl;
        String thumbnailImg;
        ThumbnailDO thumbnalDO;
        for (String picBox : picBoxs) {
            html = new Html(picBox, page.getUrl().toString());
            thumbnailImg = html.xpath(picSpiderDO.getThumbnailImgXsoup()).toString();
            thumbnailUrl = html.links().toString();
            thumbnailImg = ManageImgSrcUtil.manageThumbnail(station,thumbnailImg);
            if (StringUtils.isNotBlank(thumbnailImg) && StringUtils.isNotBlank(thumbnailUrl)) {
                if (thumbnailDao.get(thumbnailUrl) > 0) {
                    continue;
                }
                if(!thumbnailImg.startsWith("http")){
                    continue;
                }
                thumbnalDO = new ThumbnailDO();
                thumbnalDO.setThumbnailUrl(StringUtils.trim(thumbnailUrl));
                thumbnalDO.setThumbnailImg(StringUtils.trim(thumbnailImg));
                thumbnailDao.add(thumbnalDO);
            }
        }
    }

    /**
     * 判断是否第一次爬取
     * @param picSpiderDO 爬虫对象
     */
    private void isFirstCraw(PicSpiderDO picSpiderDO){
        MySpiderMonitor mySpiderMonitor = MySpiderMonitor.instance();
        Map<String,MySpiderStatus> mySpiderStatusMap = mySpiderMonitor.getSpiderStatuses();
        MySpiderStatus mySpiderStatus = null;
        for(Map.Entry<String,MySpiderStatus> entry: mySpiderStatusMap.entrySet()){
            mySpiderStatus = entry.getValue();
        }
        if(picSpiderDO.getIsFirstCrawl() == 1){
           int total = mySpiderStatus.getTotalPageCount();
           int currentPage = mySpiderStatus.getLeftPageCount();
           //如果是第二次爬取，只爬取前五万条链接
           if(total - currentPage > 50000){
               mySpiderStatus.getSpider().stop();
           }
        }
    }
}
