package com.blackman.skcrawler.core.crawler.worm;


import com.blackman.skcrawler.core.crawler.api.IDefaultCrawlerCallback;
import com.blackman.skcrawler.core.crawler.po.WormGrabedPage;
import com.blackman.skcrawler.core.crawler.po.WormPendPage;
import com.blackman.skcrawler.core.crawler.vo.CrawlerRspInfo;
import com.blackman.skcrawler.core.http.api.IDefautHttpProxyData;
import com.blackman.skcrawler.core.http.request.HttpRequest;
import com.blackman.skcrawler.core.http.util.HtmlResponseUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpResponse;

import java.util.Date;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;

/**
 * 宽度优先遍历策略爬虫
 *
 * @author kl_zjb
 * @date 2018/04/04 14:22
 *
 * *************** 原理 **************
 * 将新下载网页中发现的链接直接插入待抓取URL队列的末尾。
 * 也就是指网络爬虫会先抓取起始网页中链接的所有网页，
 * 然后再选择其中的一个链接网页，继续抓取在此网页中链接的所有网页
 *
 * *****************************
 *
 */
public class BfsCrawler extends DefaultCrawler{

    public BfsCrawler(IDefaultCrawlerCallback defaultCrawerCallback){
        this.defaultCrawerCallback = defaultCrawerCallback;
    }

    public void setDefautHttpProxyData(IDefautHttpProxyData defautHttpProxyData) {
        this.defautHttpProxyData = defautHttpProxyData;
    }

    public void execute(){
        super.execute();
        final ExecutorService fixedThreadPool = Executors.newFixedThreadPool(this.getThreadNum());
        while(true){
            if(getGrabUrlList().size() == 0){
                getThreadGrabUrl();
            }
            ThreadPoolExecutor pool = (ThreadPoolExecutor)fixedThreadPool;
            int activeCount = pool.getActiveCount();
            //System.out.println("当前活动线程：" + activeCount + " 待抓取队列：" + getGrabUrlList().size() + " 加入新的待抓取："+ newsCachePendPageList.size());
            if(activeCount >= getThreadNum()){
                continue;
            }
            fixedThreadPool.execute(new Runnable() {
                public void run() {
                    WormPendPage wormPendPage = getThreadGrabUrl();
                    if(wormPendPage == null){
                        return;
                    }
                    try {
                        ThreadPoolExecutor pool = (ThreadPoolExecutor)fixedThreadPool;
                        int activeCount = pool.getActiveCount();
                        System.out.println("当前活动线程：" + activeCount + " 待抓取队列：" + getGrabUrlList().size() + " 加入新的待抓取："+ newsCachePendPageList.size());
                        System.out.println("准备抓取：" + wormPendPage.getPageUrl());
                        WormGrabedPage grabedPage = defaultDataCallBack.selectOneByUrl(wormPendPage.getPageUrl());
                        if (grabedPage != null) {
                            return;
                        }

                        HttpRequest request = new HttpRequest();
                        HttpResponse httpResponse = request.sendHttpGetRsp(wormPendPage.getPageUrl());
                        if(httpResponse == null){
                            wormPendPage.setStatus(WormPendPage.STATUS_NORMAL);
                            wormPendPage.setModifyTime(new Date());
                            defaultDataCallBack.upPendPage(wormPendPage);
                            return;
                        }
                        String contentBuf = request.getRspContent(httpResponse);
                        runGrabHttp(wormPendPage.getPageUrl(), httpResponse, contentBuf);

                        wormPendPage.setModifyTime(new Date());
                        wormPendPage.setStatus(WormPendPage.STATUS_WORMS);
                        wormPendPage.setWormTime(new Date());
                        defaultDataCallBack.upPendPage(wormPendPage);
                        addGrabedPageUrl(wormPendPage.getPageUrl());

                        Boolean pangrabRsp = HtmlResponseUtil.panIsPatternUrl(wormPendPage.getPageUrl(), getRegularList());
                        if(!pangrabRsp){
                            return;
                        }

                        // 回调给开发者解析
                        CrawlerRspInfo crawlerRspInfo = new CrawlerRspInfo();
                        crawlerRspInfo.setContent(contentBuf);
                        crawlerRspInfo.setUrl(wormPendPage.getPageUrl());
                        defaultCrawerCallback.grabResponse(crawlerRspInfo);

                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            });
        }
    }

    public void runGrabHttp(String graburl, HttpResponse httpRsp, String contentBuf){
        if(httpRsp == null || StringUtils.isEmpty(graburl)){
            return;
        }
        // 分析抓取其中的符合的[新连接]
        List<String> newGrabUrlList = HtmlResponseUtil.handleUrl(contentBuf.toString(), this.getSeedList());
        // 分析链接是否需要补全
        HtmlResponseUtil.newGrabUrlHandle(graburl, newGrabUrlList);
        this.addGrabList(newGrabUrlList, 100);
    }
}
