package com.kongchengji.spider.station.process.processImpl;

import com.kongchengji.spider.station.constant.Constant;
import com.kongchengji.spider.station.process.AnalysisRequesAbstract;
import com.kongchengji.spider.util.*;
import lombok.Setter;
import okhttp3.*;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

/**
 * 通过访问接口获取数据的爬虫逻辑处理
 */
public class WangYiStationProcess extends AnalysisRequesAbstract {
    @Setter
    private int sleepTime = 0;
    @Setter
    private Constant.Station station;
    @Setter
    private Site site = Site
            .me()
            .setSleepTime(sleepTime)
            .setRetryTimes(3)
            .setUserAgent("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31");

    @Override
    public Site getSite() {
        return site;
    }

    /**
     * 爬虫逻辑处理
     *
     * @param page
     */
    @Override
    public void process(Page page) {
        //3.根据id获得最终的内容页,并提取相关参数
        //4.改变分页参数获取下一页的条目id,直至没有条目或者达到一万条
        //1.获取要采集的栏目集合id
        List<String> sidList = page.getHtml().xpath("//a[@sid]/@sid | //em[@cid]/@cid").all();
        //去掉-1,因为-1是全部栏目，防止重复采集
        sidList = sidList.stream().filter(sid -> Integer.valueOf(sid) > 0).collect(Collectors.toList());
        //2.遍历栏目集合id,得到所有的request对象
        OkHttpClient client = SingleOkhttpClientUtil.getInstance();
        Request request;
        //先编译好domainName和id的Pattern,避免在循环里重复编译
        String nameRegex = "(?<=domainName\\=\").*?(?=\";)";
        String idRegex = "(?<=id\\=).*?(?=;)";
        Pattern namePattern = Pattern.compile(nameRegex);
        Pattern idPattern = Pattern.compile(idRegex);
        for (String sid : sidList) {
            int start = 0;
            int total = 20;
            //每个栏目下最多一万条数据
            while (start < 10000) {
                start += total;
                String url = combineUrl(sid,String.valueOf(start),String.valueOf(total));
                request = new Request.Builder()
                        .addHeader("Accept", "*/*")
                        .addHeader("Accept-Language", "zh-CN,zh;q=0.9")
                        .addHeader("Content-Type", "text/plain")
                        .addHeader("Cache-Control", "no-cache")
                        .addHeader("Referer", "http://photo.163.com/crossdomain.html?t=20100205")
                        .addHeader("User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36")
                        .url(url)
                        .build();
                try {
                    Response response = client.newCall(request).execute();
                    String body = response.body().string();
                    //表示没有一万条数据,跳出循环
                    if(body.contains("dwr.engine._remoteHandleCallback('526063','0',null);")) {
                        break;
                    }
                    //关闭response
                    response.close();
                    List<String> urlList = listHandler(body, namePattern, idPattern);
                    //提取栏目,标题,和大图规则
                    String[] rules = {"//p[@class=\"m-crumb\"]/a/text()","//h2/text()","//div[@class=\"pic-area\"]//img/@data-lazyload-src"};
                    //访问内容链接,处理结果,因为是共性方法，抽取出了抽象类
                    contentHandler(urlList,station,rules);
                } catch (IOException e) {
                    e.printStackTrace();
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (Exception e) {
                    //防止抛出sqlException,导致线程结束,不做任何处理,只是打印下堆栈信息
                    e.printStackTrace();
                }
            }

        }
    }


    /**
     * 列表页结果处理器,提取domainName和id组合成链接信息
     *
     * @param response 需要处理的结果
     * @return 返回链接集合
     */
    @Override
    public List<String> listHandler(String response, Pattern... patterns) {
        List<String> urlList = new ArrayList<>();
        //提取出所有的对象
        Matcher nameMatcher = patterns[0].matcher(response);
        Matcher idMatcher = patterns[1].matcher(response);
        while (nameMatcher.find()) {
            idMatcher.find();
            String url = "http://pp.163.com/" + nameMatcher.group() + "/pp/";
            url += idMatcher.group() + ".html";
            urlList.add(url);
        }
        return urlList;
    }


    /**
     * 根据subUrl组称url
     * @param subUrl
     * @return
     */
    @Override
    public String combineUrl(String... subUrl) {
        return "http://photo.163.com/share/dwr/call/plaincall/PictureSetBean.getPictureSetHotListByDirId.dwr?callCount=1&scriptSessionId=%24%7BscriptSessionId%7D187&c0-scriptName=PictureSetBean&c0-methodName=getPictureSetHotListByDirId&c0-id=0&c0-param0=number%3A" + subUrl[0] + "&c0-param1=number%3A" + subUrl[1] + "&c0-param2=number%3A" + subUrl[2] + "&c0-param3=string%3AWeightAll&c0-param4=number%3A2&c0-param5=string%3AShareSet&batchId=526063";
    }

}
