package com.chance.cc.crawler.development.scripts.zhihu;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.*;
import com.chance.cc.crawler.core.downloader.proxy.Proxy;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.record.CrawlerResultRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.meta.core.bean.common.MetaResponse;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainKeys;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.Header;
import org.jsoup.Jsoup;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.URLEncoder;
import java.security.NoSuchAlgorithmException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.filter;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.result;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static org.codehaus.groovy.runtime.EncodingGroovyMethods.md5;
/**
 * @author bx
 * @date 2021/4/9 0008 12:11
 */
public class ZHSearchCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(ZHSearchCrawlerScript.class);
    private static final String DOMAIN = "zhihu";
    private static final String SITE = "search";


    public static final String searchUrlFormat = "https://www.zhihu.com/api/v4/search_v3?t=general&q=%s&correction=1&offset=%s&limit=20&lc_idx=%s&show_all_topics=0&time_zone=%s"; // a_day a_week
    public static final String searchUrlRegular = "https://www.zhihu.com/api/v4/search_v3\\S*";

    public static final String questionUrlFormat = "https://www.zhihu.com/question/%s";
    public static final String questionUrlRegular = "https://www.zhihu.com/question/\\S*";

    public static final String answerURLFormat = "https://www.zhihu.com/api/v4/questions/%s/answers?include=data[*].is_normal,admin_closed_comment,reward_info,is_collapsed,annotation_action,annotation_detail,collapse_reason,is_sticky,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,voteup_count,reshipment_settings,comment_permission,created_time,updated_time,review_info,relevant_info,question,excerpt,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp,is_labeled;data[*].mark_infos[*].url;data[*].author.follower_count,badge[*].topics&limit=20&offset=0&sort_by=updated";
    public static final String answerURLRegular = "https://www.zhihu.com/api/v4/questions/\\S*/answers\\S*";

    public static final String articleCommentUrlFormat = "https://www.zhihu.com/api/v4/articles/%s/comments?order=reverse&limit=20&offset=0&status=open";
    public static final String articleCommentUrlRegular = "https://www.zhihu.com/api/v4/articles/\\S*/comments\\S*";
    public static final String answerCommentUrlFormat = "https://www.zhihu.com/api/v4/answers/%s/comments?order=reverse&limit=20&offset=0&status=open";
    public static final String answerCommentUrlRegular = "https://www.zhihu.com/api/v4/answers/\\S*/comments\\S*";

    private Map<String,Set<String>> questionIds = new HashMap<>();
    private static final Map<String,String> baseHeaders = new HashMap<>();
    private Downloader downloader;
    private final Object lockObj = new Object();

    private static final String signHostPrefix = "http://192.168.1.210:8899/encrypt/zhihu";
    private static final String updateUrl = "https://www.zhihu.com/udid";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(searchUrlRegular);
        addUrlRegular(questionUrlRegular);
        addUrlRegular(answerURLRegular);
        addUrlRegular(articleCommentUrlRegular);
        addUrlRegular(answerCommentUrlRegular);

    }

    @Override
    public void beforeDownload(CrawlerRecordContext context) {
        if (null == downloader){
            downloader = context.getPageDownloader();
        }
        super.beforeDownload(context);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return SITE.equals(site);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();

        if (supportSourceRecords == null || supportSourceRecords.size() <1){
           return super.prepareRequest(requestRecord,supportSourceRecords);
        }

        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            if (supportSourceRecord.getHttpRequest().getUrl().endsWith("/v1/meta/"+domain()+"/keys?site="+SITE)){
                String schedulerType = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("scheduler_type");
                if (StringUtils.isBlank(schedulerType)){
                    logger.error("zhi hu search job has to config scheduler type！");
                }
                Set<String> questionIdSet = questionIds.get(schedulerType);
                if (questionIdSet == null){
                    questionIdSet = new HashSet<>();
                    questionIds.put(schedulerType,questionIdSet);
                }
                questionIdSet.clear(); //清空当前问题id列表

                try {
                    initBaseHeaders();//先初始headers
                    updateDC0(requestRecord.getHttpConfig().getProxy());//初始dco
                    initAllCrawlerRecordByKeyword(requestRecord,supportSourceRecord,allItemRecords);// cookies userAgents 初始完毕后，才能初始record
                } catch (Exception e) {
                    logger.error("init keyword record error : {}",e.getMessage());
                }
            }

        }

        return allItemRecords;
    }


    /**
     *
     * @param context
     * @return
     */
    public List<CrawlerRequestRecord> parseLinks(CrawlerRecordContext context) {
        HttpPage httpPage = context.getPage();
        CrawlerRequestRecord crawlerRequestRecord = context.getCrawlerRecord();
        List<CrawlerRequestRecord> crawlerRequestRecords = new ArrayList<CrawlerRequestRecord>();

        //初始下载器
        if (null == downloader){
            downloader = context.getPageDownloader();
        }

        if (!httpPage.isDownloadSuccess()){
            logger.error("zhihu search proxy has error ,will retry");
            addCrawlerRecords(crawlerRequestRecords,crawlerRequestRecord,httpPage);
            crawlerRequestRecord.setNeedWashPage(false); //下载异常，不需要清洗页面
            return crawlerRequestRecords;
        }

        if (isUrlMatch(httpPage.getRequest().getUrl(),searchUrlRegular)){
            parseListLinks(crawlerRequestRecords,crawlerRequestRecord,httpPage);
        } else if (isUrlMatch(httpPage.getRequest().getUrl(),questionUrlRegular)){
            parseQuestionLinks(crawlerRequestRecords,crawlerRequestRecord,httpPage);
        } else if (isUrlMatch(httpPage.getRequest().getUrl(),answerURLRegular)){
            parseAnswerLinks(crawlerRequestRecords,crawlerRequestRecord,httpPage);
        } else if (isUrlMatch(httpPage.getRequest().getUrl(),answerCommentUrlRegular)
                || isUrlMatch(httpPage.getRequest().getUrl(),articleCommentUrlFormat)){
            parseCommentLinks(crawlerRequestRecords,crawlerRequestRecord,httpPage);
        }
        return crawlerRequestRecords;
    }

    /**
     * 将url参数转换成map
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = url;
        if (url.contains("?")){
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    /**
     * 下载列表失败出现验证时，调用此方法重新从服务端获取x-udid设置到cookie中
     */
    private void updateDC0(Proxy proxy){
        HttpRequest httpRequest = new HttpRequest();
        httpRequest.setUrl(updateUrl);
        httpRequest.setMethod(HttpConstant.Method.POST);
        HttpConfig httpConfig = HttpConfig.me("dc0");
        httpConfig.setDisableCookie(true);
        httpConfig.setUseProxy(true);
        httpConfig.setProxy(proxy);
        if (null != httpConfig.getCookieStore()){
            httpConfig.getCookieStore().clear();
        }
        try {
            HttpPage httpPage = null;
            try {
                httpPage = downloader.download(httpRequest, httpConfig);
            } catch (Exception e) {
                downloader.download(httpRequest,httpConfig);
            }
            String lastUpdateCookie = "a";
            if (StringUtils.isNotBlank(httpPage.getRawText())){
                synchronized (lockObj){
                    String cookie = baseHeaders.get("cookie");
                    Matcher mtDC0 = Pattern.compile("d_c0=\"\\S*=\\|\\d*\"").matcher(cookie);
                    String dc0 = "";
                    while (mtDC0.find()){
                        dc0 = mtDC0.group(0);
                    }
                    String setDC0 = "";
                    Header[] responseHeaders = httpPage.getResponseHeaders();
                    for (Header responseHeader : responseHeaders) {
                        String headerName = responseHeader.getName();
                        if (headerName.equals("set-cookie")){
                            setDC0 = responseHeader.getValue();
                            break;
                        }
                    }
                    if (StringUtils.isNotBlank(dc0) && StringUtils.isNotBlank(setDC0)){
                        //d_c0="ADBdXIpwHhOPTgWa0UcVkbYiDq9T02HXQ4I=|1621240282";
                        String newDC0 = setDC0.split(";")[0];
                        String newCookie = cookie.replace(dc0, newDC0);
                        baseHeaders.put("cookie",newCookie);
                        logger.info("zhihu cookie-dc0 has chanced from [{}] to [{}]",dc0,newDC0);
                    }
                }
            }else {
                boolean disableCookie = httpConfig.isDisableCookie();
                logger.error("zhihu download update 'dc0' page empty , cookie not change : [{}]; disableCookie : {}",baseHeaders.get("cookie"),disableCookie);
            }
        }catch (Exception e){
            logger.error(e.getMessage());
        }
    }

    private void parseListLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        Json dataJson = new Json(httpPage.getRawText());
        String isEnd = null;
        List<String> dataItems = null;
        try {
            isEnd = dataJson.jsonPath($_type + ".paging.is_end").get();
            dataItems = dataJson.jsonPath($_type + ".data").all();
        } catch (Exception e) {
            logger.error("zhi hu search list url {} download error:{},page rawtext {},will retry!",
                    httpPage.getRequest().getUrl(),e.getMessage(),httpPage.getRawText());
            logger.error(baseHeaders.get("cookie"));
            if (StringUtils.isNotBlank(httpPage.getRawText()) && httpPage.getRawText().contains("系统监测到您的网络环境存在异常")){
                updateDC0(crawlerRequestRecord.getHttpConfig().getProxy());
            }
            addCrawlerRecords(crawlerRequestRecords, crawlerRequestRecord, httpPage);
            crawlerRequestRecord.setNeedWashPage(false);
        }
        if ("false".equals(isEnd)){
            String nextPageListUrl = dataJson.jsonPath($_type+".paging.next").get();
            int size = dataItems.size();
            Map<String, Object> urlParams = getUrlParams(crawlerRequestRecord.getHttpRequest().getUrl());
            if (null != urlParams){
                String q = (String) urlParams.get("q");
                String time_zone = (String) urlParams.get("time_zone");
                int offset = Integer.parseInt((String) urlParams.get("offset"));
                int lc_idx = Integer.parseInt((String) urlParams.get("lc_idx"));
                nextPageListUrl = String.format(searchUrlFormat,q,(offset + size),(lc_idx + size),time_zone);
            }
            CrawlerRequestRecord crawlerTurnPaeRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageListUrl)
//                    .recordKey(nextPageListUrl)
//                    .httpHeads(httpPage.getRequest().getHeaders())
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .needWashed(true)
                    .build();
            HttpRequest httpRequest = crawlerTurnPaeRequestRecord.getHttpRequest();
            Map<String, String> headers = copyMap(baseHeaders);
            try {
                genSign86Headers(nextPageListUrl,headers);
                httpRequest.setHeaders(headers);
            } catch (Exception e) {
                logger.error(e.getMessage());
            }
            crawlerRequestRecords.add(crawlerTurnPaeRequestRecord);
        }

        if (dataItems != null && dataItems.size() > 0){
            for (String dataItem : dataItems) {
                try {
                    Json dataItemJson = new Json(dataItem);
                    String type  = dataItemJson.jsonPath($_type+".object.type").get();
                    if ("answer".equals(type)){
                        String questionId = dataItemJson.jsonPath($_type + ".object.question.id").get();
                        String schedulerType = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("scheduler_type");
                        Set<String> questionIdSet = questionIds.get(schedulerType);
                        if(!questionIdSet.contains(questionId)){ //本次调度未采集该问
                            long releaseTime = Long.valueOf(dataItemJson.jsonPath($_type + ".object.updated_time").get());
                            questionIdSet.add(questionId);
                            CrawlerRequestRecord crawlerItemRequestRecord = CrawlerRequestRecord.builder() //生成问题页
                                    .itemPageRequest(crawlerRequestRecord)
                                    .httpUrl(String.format(questionUrlFormat,questionId))
//                                    .httpHeads(httpPage.getRequest().getHeaders())
                                    .releaseTime(releaseTime*1000)
                                    .copyBizTags()
                                    .notFilterRecord()
                                    .build();
                            crawlerRequestRecords.add(crawlerItemRequestRecord);
                        }

                    } else if ("question".equals(type)){
                        String questionId = dataItemJson.jsonPath($_type + ".object.id").get();
                        String schedulerType = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("scheduler_type");
                        Set<String> questionIdSet = questionIds.get(schedulerType);
                        if(!questionIdSet.contains(questionId)){ //本次调度未采集该问题
                            questionIdSet.add(questionId);
                            String answerCount = dataItemJson.jsonPath($_type + ".object.answer_count").get();
                            if (StringUtils.isNotBlank(answerCount) && !"0".equals(answerCount)){ //生成回答列表

                                CrawlerRequestRecord crawlerItemRequestRecord = genAnswerFirstCrawlerRecord(
                                        crawlerRequestRecords,crawlerRequestRecord,httpPage,String.format(answerURLFormat,questionId));

                                String follower_count = dataItemJson.jsonPath($_type + ".object.follower_count").get();
                                String visits_count = dataItemJson.jsonPath($_type + ".object.visits_count").get();
                                crawlerItemRequestRecord.tagsCreator().bizTags().addCustomKV("follower_count",follower_count);
                                crawlerItemRequestRecord.tagsCreator().bizTags().addCustomKV("visits_count",visits_count);
                                crawlerRequestRecords.add(crawlerItemRequestRecord);
                            }
                        }
                    } else if ("article".equals(type)){
                        String articleId = dataItemJson.jsonPath($_type + ".object.id").get();
                        String commentCount = dataItemJson.jsonPath($_type + ".object.comment_count").get();
                        if(!"0".equals(commentCount)){
                            CrawlerRequestRecord crawlerTurnPageRequestRecord = CrawlerRequestRecord.builder() //生成评论
                                    .turnPageRequest(crawlerRequestRecord)
                                    .httpUrl(String.format(articleCommentUrlFormat,articleId))
                                    .releaseTime(System.currentTimeMillis())
                                    .copyBizTags()
                                    .build();
                            crawlerTurnPageRequestRecord.tagsCreator().bizTags().addCustomKV("parentId",articleId);
                            crawlerRequestRecords.add(crawlerTurnPageRequestRecord);
                        }
                    } else{
                        logger.error("zhi hu search unknown item type {}",type);
                        continue;
                    }

                } catch (Exception e) {
                    logger.error("zhi hu search parse article item error:{}",e.getMessage());
                }
            }
        }
    }

    private void parseQuestionLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        try {
            //解析出问答类文章的访问量
            String views = httpPage.getHtml().xpath("//div[@class=\"NumberBoard-item\"]//strong[@class=\"NumberBoard-itemValue\"]/@title").all().get(1);
            String[] questionUrlItems = httpPage.getRequest().getUrl().split("/",-1);
            String questionId = questionUrlItems[questionUrlItems.length-1];
            String answerListUrl = String.format(answerURLFormat,questionId);

            CrawlerRequestRecord crawlerItemRequestRecord = genAnswerFirstCrawlerRecord(
                    crawlerRequestRecords,crawlerRequestRecord,httpPage,answerListUrl);
            crawlerItemRequestRecord.tagsCreator().bizTags().addCustomKV("visits_count",views);

        } catch (Exception e) {
            logger.error("zhi hu search parse question {} error:{},will retry",httpPage.getRequest().getUrl(),e.getMessage());
            addCrawlerRecords(crawlerRequestRecords, crawlerRequestRecord, httpPage);
            crawlerRequestRecord.setNeedWashPage(false);
        }
    }

    private CrawlerRequestRecord genAnswerFirstCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRequestRecord,
                                             HttpPage httpPage,String answerListUrl){
        if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("article_record_filter_info")) {
            logger.error("zhi hu answer crawler need to filter information!");
            return null;
        }

        KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("article_record_filter_info");
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);

        CrawlerRequestRecord crawlerTurnPageRequestRecord = CrawlerRequestRecord.builder() //生成评论
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(answerListUrl)
//                .httpHeads(httpPage.getRequest().getHeaders())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .notFilterRecord()
                .build();
        crawlerTurnPageRequestRecord.setFilter(filterInfoRecord.getFilter());
        crawlerTurnPageRequestRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
        crawlerTurnPageRequestRecord.setTurnPageFilterInfo(filterInfoRecord.getTurnPageFilterInfo());
        crawlerRequestRecords.add(crawlerTurnPageRequestRecord);
        return crawlerTurnPageRequestRecord;
    }

    private void parseAnswerLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        try {
            Json resultJson = new Json(httpPage.getRawText());
            List<String> dataItmes = resultJson.jsonPath($_type + ".data").all();
            if (dataItmes != null && dataItmes.size() > 0){
                for (String dataItme : dataItmes) {
                    Json dataItemJson = new Json(dataItme);
                    String commentCount = dataItemJson.jsonPath($_type + ".comment_count").get();
                    String answerId = dataItemJson.jsonPath($_type + ".id").get();
                    if (!"0".equals(commentCount)){ //生成评论列表

                        if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                            logger.error("zhi hu comment crawler need to filter information!");
                            return;
                        }

                        KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
                        CrawlerRequestRecord crawlerTurnPageRequestRecord = CrawlerRequestRecord.builder() //生成评论
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(String.format(answerCommentUrlFormat,answerId))
                                .httpHeads(httpPage.getRequest().getHeaders())
                                .releaseTime(System.currentTimeMillis())
                                .copyBizTags()
                                .notFilterRecord()
                                .needWashed(true)
                                .build();
                        crawlerTurnPageRequestRecord.setFilter(filterInfoRecord.getFilter());
                        crawlerTurnPageRequestRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                        crawlerTurnPageRequestRecord.setTurnPageFilterInfo(filterInfoRecord.getTurnPageFilterInfo());
                        crawlerTurnPageRequestRecord.tagsCreator().bizTags().addCustomKV("parentId",answerId);
                        crawlerRequestRecords.add(crawlerTurnPageRequestRecord);
                    }
                }
            }

            String isEnd = resultJson.jsonPath($_type + ".paging.is_end").get();
            if ("false".equals(isEnd)){
                String next = resultJson.jsonPath($_type + ".paging.next").get();
                CrawlerRequestRecord crawlerTurnPageRequestRecord = CrawlerRequestRecord.builder() //生成评论
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(next)
                        .httpHeads(httpPage.getRequest().getHeaders())
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .needWashed(true)
                        .build();
                crawlerRequestRecords.add(crawlerTurnPageRequestRecord);
            }

        } catch (Exception e) {
            logger.error("zhi hu search parse answer {} error:{},will retry",httpPage.getRequest().getUrl(),e.getMessage());
            addCrawlerRecords(crawlerRequestRecords, crawlerRequestRecord, httpPage);
            crawlerRequestRecord.setNeedWashPage(false);
        }
    }

    private void parseCommentLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        try {
            Json resultJson = new Json(httpPage.getRawText());

            if (httpPage.getRawText().contains("\"message\":\"评论已关闭\"")
                    || httpPage.getRawText().contains("\"message\":\"资源不存在\"")){
                logger.warn("zhi hu comment url {} is error: {}!",httpPage.getRequest().getUrl(),httpPage.getRawText());
                crawlerRequestRecord.setNeedWashPage(false);
                return;
            }

            String isEnd = resultJson.jsonPath($_type + ".paging.is_end").get();
            if ("false".equals(isEnd)){
                String next = resultJson.jsonPath($_type + ".paging.next").get();
                CrawlerRequestRecord crawlerTurnPageRequestRecord = CrawlerRequestRecord.builder() //生成评论
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(next)
                        .httpHeads(httpPage.getRequest().getHeaders())
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .needWashed(true)
                        .build();
                crawlerRequestRecords.add(crawlerTurnPageRequestRecord);
            }

        } catch (Exception e) {
            logger.error("zhi hu search parse comment {} error:{},will retry",httpPage.getRequest().getUrl(),e.getMessage());
            addCrawlerRecords(crawlerRequestRecords, crawlerRequestRecord, httpPage);
            crawlerRequestRecord.setNeedWashPage(false);
        }
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {

    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();

        if (isUrlMatch(page.getRequest().getUrl(),searchUrlRegular)){
            washListPage(crawlerDataList,crawlerRecord,page);
        } else if (isUrlMatch(page.getRequest().getUrl(),answerURLRegular)){
            washAnswerLinks(crawlerDataList,crawlerRecord,page);
        } else if (isUrlMatch(page.getRequest().getUrl(),answerCommentUrlRegular)
                || isUrlMatch(page.getRequest().getUrl(),articleCommentUrlFormat)){
            washCommentLinks(crawlerDataList,crawlerRecord,page);
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washListPage(List<CrawlerData> crawlerDataList,CrawlerRequestRecord crawlerRecord, HttpPage page) {
        Json dataJson = new Json(page.getRawText());
        List<String> dataItems = null;
        try {
            dataItems = dataJson.jsonPath($_type + ".data").all();
        } catch (Exception e) {
            e.printStackTrace();
        }

        for (String dataItem : dataItems) {
            try {
                Json dataItemJson = new Json(dataItem);
                String type  = dataItemJson.jsonPath($_type+".object.type").get();
                if ("article".equals(type)){
                    String articleId = dataItemJson.jsonPath($_type + ".object.id").get();

                    String content = dataItemJson.jsonPath($_type + ".object.content").get();
                    Html contentHtml = Html.create(content);
                    List<String> imgUrls = contentHtml.xpath("//img/@src").all();

                    CrawlerData crawlerArticleData = CrawlerData.builder()
                            .data(crawlerRecord, page)
                            .url("https://zhuanlan.zhihu.com/p/"+articleId)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE,articleId))
                            .releaseTime(Long.valueOf(dataItemJson.jsonPath($_type + ".object.updated_time").get())*1000)
                            .addContentKV(Field_Author, dataItemJson.jsonPath($_type + ".object.author.name").get())
                            .addContentKV(Field_Author_Id, dataItemJson.jsonPath($_type + ".object.author.id").get())
                            .addContentKV(Field_Title, Jsoup.parse(dataItemJson.jsonPath($_type + ".object.title").get()).text())
                            .addContentKV(Field_Content,Jsoup.parse(content).text())
                            .addContentKV(Field_Images, JSON.toJSONString(imgUrls))
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                            .requestLabelTag(result)
                            .requestLabelTag(filter)
                            .copyBizTags()
                            .build();
                    crawlerDataList.add(crawlerArticleData);

                    CrawlerData crawlerInteractionData = CrawlerData.builder()
                            .data(crawlerRecord, page)
                            .url("https://zhuanlan.zhihu.com/p/"+articleId)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE, interaction.enumVal(), articleId))
                            .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE,articleId))
                            .releaseTime(Long.valueOf(dataItemJson.jsonPath($_type + ".object.updated_time").get())*1000)
                            .addContentKV(Field_I_Comments, dataItemJson.jsonPath($_type + ".object.comment_count").get())
                            .addContentKV(Field_I_Likes, dataItemJson.jsonPath($_type + ".object.voteup_count").get())
                            .resultLabelTag(interaction)
                            .requestLabelTag(result)
                            .requestLabelTag(filter)
                            .copyBizTags()
                            .build();
                    crawlerDataList.add(crawlerInteractionData);
                }

            } catch (Exception e) {
                logger.error("zhi hu search wash article type item error:{}",e.getMessage());
            }
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washAnswerLinks(List<CrawlerData> crawlerDataList,CrawlerRequestRecord crawlerRecord, HttpPage page) {
        Json resultJson = new Json(page.getRawText());
        List<String> dataItems = resultJson.jsonPath($_type + ".data").all();
        if (dataItems != null && dataItems.size() > 0){
            for (String dataItem : dataItems) {
                try {
                    Json dataItemJson = new Json(dataItem);
                    String questionId = dataItemJson.jsonPath($_type + ".question.id").get();
                    String answerId = dataItemJson.jsonPath($_type + ".id").get();

                    String content = dataItemJson.jsonPath($_type + ".content").get();
                    Html contentHtml = Html.create(content);
                    List<String> imgUrls = contentHtml.xpath("//noscript/img/@src").all();

                    CrawlerData crawlerArticleData = CrawlerData.builder()
                            .data(crawlerRecord, page)
                            .url("https://www.zhihu.com/question/"+questionId+"/answer/"+answerId)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE,answerId))
                            .releaseTime(Long.valueOf(dataItemJson.jsonPath($_type + ".updated_time").get())*1000)
                            .addContentKV(Field_Author, dataItemJson.jsonPath($_type + ".author.name").get())
                            .addContentKV(Field_Author_Id, dataItemJson.jsonPath($_type + ".author.id").get())
                            .addContentKV(Field_Title, Jsoup.parse(dataItemJson.jsonPath($_type + ".question.title").get()).text())
                            .addContentKV(Field_Content,Jsoup.parse(content).text())
                            .addContentKV(Field_Images, JSON.toJSONString(imgUrls))
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                            .requestLabelTag(result)
                            .requestLabelTag(filter)
                            .copyBizTags()
                            .build();
                    crawlerDataList.add(crawlerArticleData);

                    String visits_count = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("visits_count");

                    CrawlerData crawlerInteractionData = CrawlerData.builder()
                            .data(crawlerRecord, page)
                            .url("https://www.zhihu.com/question/"+questionId+"/answer/"+answerId)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE, interaction.enumVal(), answerId))
                            .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE,answerId))
                            .releaseTime(Long.valueOf(dataItemJson.jsonPath($_type + ".updated_time").get())*1000)
                            .addContentKV(Field_I_Comments, dataItemJson.jsonPath($_type + ".comment_count").get())
                            .addContentKV(Field_I_Likes, dataItemJson.jsonPath($_type + ".voteup_count").get())
                            .addContentKV(Field_I_Views, visits_count)
                            .resultLabelTag(interaction)
                            .requestLabelTag(result)
                            .requestLabelTag(filter)
                            .copyBizTags()
                            .build();
                    crawlerDataList.add(crawlerInteractionData);
                } catch (Exception e) {
                    logger.error("zhi hu search wash answer type item error:{}",e.getMessage());
                }

            }
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washCommentLinks(List<CrawlerData> crawlerDataList,CrawlerRequestRecord crawlerRecord, HttpPage page) {
        Json resultJson = new Json(page.getRawText());
        List<String> dataItems = resultJson.jsonPath($_type + ".data").all();
        if (dataItems != null && dataItems.size() > 0){
            for (String dataItem : dataItems) {
                try {
                    Json dataItemJson = new Json(dataItem);
                    String parentId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("parentId");
                    String commentId = dataItemJson.jsonPath($_type + ".id").get();

                    CrawlerData crawlerCommentData = CrawlerData.builder()
                            .data(crawlerRecord, page)
                            .url("")
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE,parentId,commentId))
                            .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE,parentId))
                            .releaseTime(Long.valueOf(dataItemJson.jsonPath($_type + ".created_time").get())*1000)
                            .addContentKV(Field_Author, dataItemJson.jsonPath($_type + ".author.member.name").get())
                            .addContentKV(Field_Author_Id, dataItemJson.jsonPath($_type + ".author.member.id").get())
                            .addContentKV(Field_Content, Jsoup.parse(dataItemJson.jsonPath($_type + ".content").get()).text())
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                            .copyBizTags()
                            .requestLabelTag(result)
                            .requestLabelTag(filter)
                            .build();
                    crawlerDataList.add(crawlerCommentData);

                    CrawlerData crawlerInteractionData = CrawlerData.builder()
                            .data(crawlerRecord, page)
                            .url("")
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE, interaction.enumVal(),parentId,commentId))
                            .parentId(crawlerCommentData.getDataId())
                            .releaseTime(Long.valueOf(dataItemJson.jsonPath($_type + ".created_time").get())*1000)
                            .addContentKV(Field_I_Likes, dataItemJson.jsonPath($_type + ".vote_count").get())
                            .resultLabelTag(interaction)
                            .copyBizTags()
                            .requestLabelTag(result)
                            .requestLabelTag(filter)
                            .build();
                    crawlerDataList.add(crawlerInteractionData);
                } catch (Exception e) {
                    logger.error("zhi hu search wash comment type item error:{}",e.getMessage());
                }

            }
        }
        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void initBaseHeaders() {
        baseHeaders.put("authority","www.zhihu.com");
//        baseHeaders.put("x-ab-param","tp_topic_style=0;zr_expslotpaid=1;pf_adjust=0;tp_contents=2;pf_noti_entry_num=0;tp_dingyue_video=0;tp_zrec=0;qap_question_visitor= 0;zr_slotpaidexp=1;top_test_4_liguangyi=1;qap_question_author=0;se_ffzx_jushen1=0");
//        baseHeaders.put("x-ab-pb","CnhAATQM2gJWDLQKmwu0ALcAiAHBArULaQE7Ap8CTwG7AtgC5AoKAsoC9AsqAn0CiQLMAgELQwA3DOALPwCmAbkCxwLsCkcA1wJSC3QBhAKxAo0BGwDcC4kMawFFAr8C1wsHDMAC2QLPC8ICDwtnAFgBagGMAmALbQISPAAAAAEAAgAAAAADAAAAAAAAAAAAAAAAAAAAGAEAAAALAAEAAAEAAAAAAAAAAAAAAAAAAAsAAAAAAAAAAA==");
        baseHeaders.put("x-api-version","3.0.91");
        baseHeaders.put("sec-ch-ua-mobile","?0");
        baseHeaders.put("user-agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36");
        baseHeaders.put("x-requested-with","fetch");
        baseHeaders.put("x-app-za","OS=Web");
        baseHeaders.put("x-zse-93","101_3_2.0");
        baseHeaders.put("sec-ch-ua","\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"90\", \"Google Chrome\";v=\"90\"");
        baseHeaders.put("accept","*/*");
        baseHeaders.put("sec-fetch-site","same-origin");
        baseHeaders.put("sec-fetch-mode","cors");
        baseHeaders.put("sec-fetch-dest","empty");
        baseHeaders.put("accept-language","zh-CN,zh;q=0.9");
        baseHeaders.put("cookie","d_c0=\"AKBdqRjJOxOPTvi9TukxkaNyBpX2K5DMHNc=|1623209653\";");
    }

    private String getDC0(String cookie){

        String dc0 = "";
        String[] split = cookie.split(";");
        for (String str : split) {
            if (StringUtils.isNotBlank(str.trim())){
                String[] kvs = str.trim().split("=");
                if (kvs[0].equalsIgnoreCase("d_c0")){
                    dc0 = str.trim().split("d_c0=")[1];
                }
            }
        }
        return dc0;
    }

    private String removeOrigin(String url){
        String api = "";
        if (url.startsWith("http") && url.contains("com")){
            api = url.split("com")[1];
        }
        return api;
    }

    private void initAllCrawlerRecordByKeyword(CrawlerRequestRecord requestRecord,
                                               CrawlerRequestRecord supportSourceRecord,
                                               List<CrawlerRecord> allItemRecords){
        try {
            HttpPage httpPage = supportSourceRecord.getInternalDownloadPage();
            MetaResponse metaResponse = JSON.parseObject(httpPage.getRawText(), MetaResponse.class);
            String time_zone = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("time_zone");
            if (metaResponse.getStatus() == 0 && metaResponse.getContent() != null){
                List<String> contents = (List<String>) metaResponse.getContent();
                for (String content : contents) {
                    CrawlerDomainKeys crawlerDomainKeys = JSON.parseObject(content, CrawlerDomainKeys.class);
                    String keyword = crawlerDomainKeys.getKeyword();
                    String searchUrl = String.format(searchUrlFormat, URLEncoder.encode(keyword, "utf-8"),0,0, time_zone);

                    if (searchUrl.contains("+")){
                        searchUrl = searchUrl.replaceAll("\\+","%20");
                    }
                    CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(requestRecord)
                            .httpUrl(searchUrl)
                            .httpHeads(requestRecord.getHttpRequest().getHeaders())
                            .releaseTime(System.currentTimeMillis())
                            .copyBizTags()
                            .needWashed(true)
                            .notFilterRecord()
                            .build();
                    crawlerRequestRecord.tagsCreator().bizTags().addKeywords(keyword);
                    HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
                    Map<String, String> headers = copyMap(baseHeaders);
                    genSign86Headers(searchUrl,headers);
                    httpRequest.setHeaders(headers);
                    allItemRecords.add(crawlerRequestRecord);
                }
            }
        } catch (Exception e) {
            logger.error(e.getMessage(),e);
        }
    }

    private void genSign86Headers(String searchUrl, Map<String, String> headers) throws NoSuchAlgorithmException {
        String api = removeOrigin(searchUrl);
        String dc0 = getDC0(headers.get("cookie"));
        String x_zse_83 = headers.get("x-zse-93");
        logger.info("93: [{}]",x_zse_83);
        String needEncodeStr = StringUtils.joinWith("+",x_zse_83,api,dc0);
        logger.info("source: [{}]",needEncodeStr);
        String md5Code = md5(needEncodeStr.getBytes());
        logger.info("md5source: [{}]",md5Code);
        String signCode = downloadSign(md5Code);
        if (StringUtils.isNotBlank(signCode)){
            logger.info("x-zse-96: [2.0_{}]",signCode);
            headers.put("x-zse-96","2.0_" + signCode);
        }else {
            logger.error("download sign code is empty");
        }
    }

    private String downloadSign(String md5Code) {
        String sign86 = "";
        String url = signHostPrefix + "?code=" + md5Code;
        HttpConfig httpConfig = HttpConfig.me("sign");
        HttpRequest httpRequest = new HttpRequest();
        httpRequest.setUrl(url);
        try {
            HttpPage httpPage = downloader.download(httpRequest, httpConfig);
            JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
            sign86 = jsonObject.getString("sign");
        } catch (Exception e) {
            logger.error(e.getMessage());
        }
        return sign86;
    }

    public static Map<String, String> copyMap(Map<String,String> inMap){
        Map<String,String> outMap = new HashMap<>();
        for (Map.Entry<String, String> entry : inMap.entrySet()) {
            outMap.put(entry.getKey(),entry.getValue());
        }
        return outMap;
    }

    private void addCrawlerRecords(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){

        int count = 1;
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        if (crawlerBusinessTags.hasKVTag("download_retry_count")){
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag("download_retry_count").getVal();
            if (count >= 20){
                logger.error("zhihu search download he number of retries exceeds the limit,request url {},page rawText [{}]",
                        crawlerRecord.getHttpRequest().getUrl(),page.getRawText());
                return;
            }
        }

        count++;
        crawlerBusinessTags.addCustomKV("download_retry_count",count);

        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRecord)
                .httpRequest(crawlerRecord.getHttpRequest())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .build();
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    /**
     *
     * 重写 crawlerProcess 以支持 download 下载
     * @param context
     */

    @Override
    public void crawlerProcess(CrawlerRecordContext context) {
        //parse request list
        if (context.getCrawlerRecord().isNeedParsedPage()){
            parsePage(context);
        }

        //need wash request
        if(context.getCrawlerRecord().isNeedWashPage()){
            washResult(context);
        }
    }

    private void parsePage(CrawlerRecordContext context){
        //parse page links
        List<CrawlerRequestRecord> links = parseLinks(context);
        //internal download
        internalDownload(context,links);
        //after internal download
        if (context.hasInternalDownloadLinks()){
            afterInternalDownload(context.getCrawlerRecord(),context.getInternalDownloadLinks(),links);
        }
        //add to context parsed links
        if (links != null && links.size() >0){
            for (CrawlerRequestRecord requestRecord : links){
                context.addCrawlerRecord(requestRecord);
            }
        }else{
            logger.warn("record [{}] parsed links is null!",context.getCrawlerRecord());
        }
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        return null;
    }

    private void internalDownload(CrawlerRecordContext context,List<CrawlerRequestRecord> links){
        //extract internal download links
        List<CrawlerRequestRecord> internalDownloadLinks = new ArrayList<>();
        for (CrawlerRequestRecord requestRecord : links){
            if (requestRecord.tagsCreator().requestTags().hasRequestType(CrawlerEnum.CrawlerRequestType.internalDownload)){
                internalDownloadLinks.add(requestRecord);
            }
        }
        if (internalDownloadLinks.size()>0){
            links.removeAll(internalDownloadLinks);
        }
        //download links
        if (internalDownloadLinks == null || internalDownloadLinks.size()<=0){
            return;
        }
        for (CrawlerRequestRecord requestRecord : internalDownloadLinks){
            HttpPage page = context.getPageDownloader()
                    .download(requestRecord.getHttpRequest(),requestRecord.getHttpConfig());
            requestRecord.setInternalDownloadPage(page);
        }
        //set to context
        context.setInternalDownloadLinks(internalDownloadLinks);
    }

    private void washResult(CrawlerRecordContext context){
        CrawlerRequestRecord crawlerRecord = context.getCrawlerRecord();
        HttpPage page = context.getPage();

        List<CrawlerResultRecord> requestResults = new ArrayList<>();
        List<CrawlerData> crawlerDataList = washPage(crawlerRecord,page);
        if (crawlerDataList !=null && crawlerDataList.size()>0){
            for (CrawlerData crawlerData : crawlerDataList){
                //生成result record
                CrawlerResultRecord crawlerResultRecord = new CrawlerResultRecord();
                if (crawlerData.getFilter()!=null){
                    //filter info
                    crawlerResultRecord.setFilter(crawlerData.getFilter());
                    crawlerResultRecord.setFilterInfos(crawlerData.getFilterInfos());
                }
                crawlerResultRecord.setRecordKey(crawlerData.getDataId());
                crawlerResultRecord.setReleaseTime(crawlerData.getReleaseTime());
                //request tags
                crawlerResultRecord.setTags(crawlerData.getTags());
                //翻入过滤标签
                crawlerResultRecord.setFilterPipelineResult(crawlerData.isFilterPipelineResult());
                crawlerResultRecord.setJsonStr(JSON.toJSONString(crawlerData));
                requestResults.add(crawlerResultRecord);
            }
        }
        context.setRequestResults(requestResults);
    }
}
