package com.chance.cc.crawler.development.scripts.dcdapp.dynamic;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;

public class DCDDynamicScript extends CrawlerCommonScript {
    private Logger log = LoggerFactory.getLogger(DCDDynamicScript.class);

    private static final String DOMAIN = "dcdapp";
    private static final String SIDE = "dynamic";
    private static final String RECORD_AGAIN_REQUEST = "record_again_request";
    private static final String TURN_PAGE_ITEM_REQUEST = "turn_page_item_request";
    private static final String TAB_FILTER_SHOW_NAME = "tab_filter_show_name";//以哪种排序方式获取
    private static final String TYPE = "type";//标识 是否 为文章 或者 问答  或者视频 232\\d+ 问答类型2309（标题类型） 2310 2311 2312 文章类型 2313 视频类型
    private static final String KEYS = "keys";
    private static final String COMMONFILTER = "comment_record_filter_info";
    private static final String DOMAIN_RESULT_JSON_RECORD_TAG = "domain_result_json";//初始record结果字段


    private static final String DONGCHEDIURL = "https://www.dongchedi.com/";
    private static final String CHEXIURL = "https://www.dongchedi.com/auto/series/";
    private static final String USER = "https://www.dongchedi.com/user/";
    private static final String ARTICLE = "https://www.dongchedi.com/ugc/article/";
    private static final String DONGCHEDI = "https://www.dongchedi.com";
    private static final String COMMENTS = "https://www.dongchedi.com/motor/pc/ugc/detail/reply_list?count=10&comment_id=%s&cursor=%s&group_id=%s";

    private static final String LIST = "https://www.dongchedi.com/motor/pc/ugc/community/cheyou_list?series_or_motor_id=%s&sort_type=%s&tab_name=%s&offset=%s&count=30";
    private static final String CHEXI = "https://www.dongchedi.com/auto/series/\\S*";
    private static final String DYNAMIC = "https://www.dongchedi.com/community/\\S*";
    private static final String LISTURL = "https://www.dongchedi.com/motor/pc/ugc/community/cheyou_list\\?series_or_motor_id=\\S*";
    private static final String ARTICLEURL = "https://www.dongchedi.com/ugc/article/\\S*";
    private static final String articleUrl2 = "https://www.dongchedi.com/ugc/article/\\S*-\\S*";
    private static final String COMMENTSURL = "https://www.dongchedi.com/motor/pc/ugc/detail/reply_list?\\S*";
    private static final String USERURL = "https://www.dongchedi.com/user/\\S*";
    private static final String DONGCHEDI1 = "https://www.dongchedi.com/";//懂车帝url
    @Override
    public String domain() {
        return DOMAIN;
    }
    @Override
    public void initUrlRegulars() {
        addUrlRegular(CHEXI);
        addUrlRegular(DYNAMIC);
        addUrlRegular(LISTURL);
        addUrlRegular(ARTICLEURL);
        addUrlRegular(COMMENTSURL);
        addUrlRegular(USERURL);
        addUrlRegular(DONGCHEDI1);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        String site = crawlerRecord.tagsCreator().bizTags().site();
        return SIDE.equals(site);
    }

    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportSourceRecords){
        List<CrawlerRecord> prepareLinks = new ArrayList<>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();//回溯采集方法
        if (requestUrl.matches(DONGCHEDIURL) && crawlerRequestRecord.tagsCreator().bizTags().hasKVTag(DOMAIN_RESULT_JSON_RECORD_TAG)) {
            KVTag domainResultJson = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(DOMAIN_RESULT_JSON_RECORD_TAG);
            CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()), CrawlerDomainUrls.class);
            String url = crawlerDomainUrls.getUrl();
            Json urlJson = new Json(url);
            String itemUrl = urlJson.jsonPath($_type + ".itemUrl").get();
            log.info(itemUrl);
            long releaseTimeToLong = Long.parseLong(urlJson.jsonPath($_type + ".releaseTimeToLong").get());
            if (!isDateRange(crawlerRequestRecord, releaseTimeToLong)) {
                return prepareLinks;
            }
            crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag(DOMAIN_RESULT_JSON_RECORD_TAG);
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            prepareLinks.add(record);
        }

        if(!crawlerRequestRecord.getHttpRequest().getUrl().matches(DONGCHEDIURL)){
            return prepareLinks;
        }
        if (supportSourceRecords != null){
            for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords){
                String url = supportSourceRecord.getHttpRequest().getUrl();
                if (url.contains("keys")){
                    log.info("采集关键字 success");
                    this.initKeyWord(crawlerRequestRecord,supportSourceRecord);
                }
            }
            List<String> keys = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagListVal(KEYS);
            crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(KEYS,null);
            if (keys == null){
                return prepareLinks;
            }
            log.info(keys.get(0));
            for (int i = 0; i <keys.size() ; i++) {
                String url = CHEXIURL+keys.get(i);//得到车系页url
                CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .copyResultTags()
                        .copyBizTags()
                        .build();
                prepareLinks.add(record);
            }
        }
        return prepareLinks;
    }
    //取出代表不同的车系key
    private void initKeyWord(CrawlerRequestRecord crawlerRequestRecord, CrawlerRequestRecord supportSourceRecord) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type+".msg").get();
        if (!"success".equals(msg)){
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }
        List<String> keys = new ArrayList<>();
        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all){
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");
            keys.add(keyword);
        }
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(KEYS, keys);
    }



    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if((page.getStatusCode() != 200) || (!page.isDownloadSuccess())){
            log.error("download page !=200 or page==null",url,page.getStatusCode());
            if(page.getStatusCode() != 404){//重新下载
                this.recordAgainRequest(crawlerRecord,parseLinks);
                return parseLinks;
            }
        }
        if(url.matches(CHEXI)){//解析车系url
            crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",null);
            this.CheXiParseLinks(crawlerRecord,page,parseLinks);
        }
        if (url.matches(DYNAMIC)){//解析 模块 url
            this.dynamicParseLinks(crawlerRecord,page,parseLinks);
        }
        if(url.matches(LISTURL)){//解析列表明细页url
            this.listPageDynamicParseLinks(crawlerRecord,page,parseLinks);
        }
        if(url.matches(ARTICLEURL)){//解析文章url
            this.parseLinkArticle(crawlerRecord,page,parseLinks);
        }
        if(url.matches(COMMENTSURL)){//解析隐藏作者的url
            this.parseLinkAuthor(crawlerRecord,page,parseLinks);
        }
        return parseLinks;
    }
    private void parseLinkAuthor(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        KVTag tag =  crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("authorMap");
        Map<String,String> authorMap = null;
        if (tag == null){
            authorMap = new HashMap<>();
        }else{
            authorMap  = (Map<String, String>) tag.getVal();
        }
        List<String> text = page.getJson().jsonPath($_type + ".data").all();
        if (text.size() == 0) {
            return;
        }
        List<String> all = page.getJson().jsonPath($_type + ".data.reply_list").all();
        if (all == null) {
            return;
        }//如果没有隐藏评论 无法取到作者
        for (String s : all) {
            JSONObject jsonObject = JSONObject.parseObject(s);
            JSONObject profile_info = jsonObject.getJSONObject("profile_info");
            String user_id = (String) profile_info.get("user_id");
            String authorUrl = USER + user_id;
            if (authorMap.get(user_id) == null) {
                CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRecord)
                        .httpUrl(authorUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                        .build();
                record.setNeedWashPage(false);
                record.setNeedParsedPage(false);
                parseLinks.add(record);
            }
        }
    }
    //解析文章的url
    private void parseLinkArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        KVTag tag =  crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("authorMap");
        Map<String,String> authorMap = null;
        if (tag == null){
             authorMap = new HashMap<>();
        }else{
             authorMap  = (Map<String, String>) tag.getVal();
        }

        //1.先得到所有文章内  作者 评论的作者
        String author_1 = page.getHtml().xpath("//*[@id=\"__next\"]/div/div/div[2]/div[1]/div[1]/div[1]/div/div[1]/a/@href").get();
        String authorId_1 = author_1.substring(6);
        if (authorMap.get(authorId_1) == null){
            author_1 = DONGCHEDI + author_1;//文章作者url
            CrawlerRequestRecord record_1 = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(author_1)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            parseLinks.add(record_1);
        }
        String text_1 = page.getHtml().xpath("//*[@id=\"__next\"]/div[1]/div/div[2]/div[1]/ul/li//text()").get();
        if (text_1 == null){return;}//没有评论
        //2.得到所有评论的作者url
        List<Selectable> nodes = page.getHtml().xpath("//*[@id=\"__next\"]/div[1]/div/div[2]/div[1]/ul/li").nodes();
        int count = 0;
        for (Selectable node : nodes){
            String author_2 = node.xpath("./div/div[1]/div/div[1]/a/@href").get();//评论作者url
            String authorId_2 = author_2.substring(6);
            if(authorMap.get(authorId_2) == null){
                author_2 = DONGCHEDI + author_2;
                CrawlerRequestRecord record_2 = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRecord)
                        .httpUrl(author_2)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                        .build();
                parseLinks.add(record_2);
            }
            String text_2 = node.xpath("./div/div[2]/ul/li//text()").get();
            if (text_2 == null){
                ++count;
                continue;
            }//没有评论已发表评论的作者
            List<Selectable> nodes1 = node.xpath("./div/div[2]/ul/li").nodes();//评论已发表评论的作者
            String hidden = node.xpath("./div/div[2]/div[2]/div//text()").get();//标识是否存在多条被隐藏的评论
            if (hidden != null){//如果存在隐藏的评论
                this.hiddenAuthorComments(crawlerRecord,page,parseLinks,count);
            }
            ++count;
            for(Selectable node1 : nodes1){
                String author_3 = node1.xpath("./section/div/div/a/@href").get();
                String authorId_3 = author_3.substring(6);
                if (authorMap.get(authorId_3)==null){
                    author_3 = DONGCHEDI + author_3;
                    CrawlerRequestRecord record_3 = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRecord)
                            .httpUrl(author_3)
                            .releaseTime(System.currentTimeMillis())
                            .copyBizTags()
                            .copyResultTags()
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                            .build();
                    parseLinks.add(record_3);
                }
            }
        }
        //获得下一页链接
        List<Selectable> node3 = page.getHtml().xpath("//*[@id=\"__next\"]/div[1]/div/div[2]/div[1]/div[3]/ul/li").nodes();
        if (node3.size()==0){return;}//代表到头了
        String nextPageUrl;
        if(node3.size() == 3){ return;}//代表没有下一页
        else{
            nextPageUrl = node3.get(node3.size()-1).xpath("./a/@href").get();
        }
        if( nextPageUrl!= null) {
        nextPageUrl = DONGCHEDI + nextPageUrl;
        CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(nextPageUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        nextPageRecord.tagsCreator().bizTags().addCustomKV("turn_page","turn_page");
        parseLinks.add(nextPageRecord);
        }
    }
    /*
    * 清洗隐藏作者
    * */
    private void hiddenAuthorComments(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks, int count) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        String seriesId = url.substring(url.lastIndexOf("/")+1);//获取到车系id
        String json = page.getHtml().xpath("//*[@id=\"__NEXT_DATA__\"]//text()").get();
        JSONObject jsonObject = JSONObject.parseObject(json);
        JSONObject props = jsonObject.getJSONObject("props");
        JSONObject pageProps = props.getJSONObject("pageProps");
        JSONObject comment = pageProps.getJSONObject("comment");
        JSONArray comment_data = comment.getJSONArray("comment_data");
        int size = comment_data.size();
        int i = 0;
        for (Object s : comment_data){
            if (i == count){
                JSONObject data = JSONObject.parseObject((String) s);
                String comment_id_str = (String) data.get("comment_id_str");
                //String substring = comment_id_str.substring(0, comment_id_str.length() - 2); //获取每个评论唯一id
                JSONObject reply_data = data.getJSONObject("reply_data");
                String cursor = (String) reply_data.get("cursor");
                String commentsUrl = String.format(COMMENTS, comment_id_str, cursor, seriesId);//获取到隐藏评论的url
                CrawlerRequestRecord record1 = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRecord)
                        .httpUrl(commentsUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parseLinks.add(record1);
                break;
            }
            ++i;
        }
    }

    //解析作者 粉丝数  以及隐藏评论中的作者和粉丝数
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord,List<CrawlerRequestRecord> internalDownloadRecords,List<CrawlerRequestRecord> links){
        KVTag tag =  crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("authorMap");
        Map<String,String> authorMap = null;
        if (tag == null){
            authorMap = new HashMap<>();
        }else{
            authorMap  = (Map<String, String>) tag.getVal();
        }

        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords){
            String url1 = internalDownloadRecord.getHttpRequest().getUrl();
            if (internalDownloadRecord.getHttpRequest().getUrl().matches(USERURL)){
                String url = internalDownloadRecord.getHttpRequest().getUrl();
                String[] split = url.split("/");
                String authorId = split[4];//粉丝id
                Html html = internalDownloadRecord.getInternalDownloadPage().getHtml();
                String authorFollow = html.xpath("//*[@id=\"__next\"]/div[1]/div[2]/div/div[1]/div[2]/header/div/section[2]/a[1]/span[1]").get();//获取粉丝数
                if(authorMap.get(authorId) == null){
                    if (StringUtils.isBlank(authorFollow)){
                        authorFollow = "0";
                    }
                    if (tag == null){
                        authorMap.put(authorId,authorFollow);
                        crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",authorMap);
                    }else{
                        authorMap.put(authorId,authorFollow);
                    }

                }
            }
        }
    }

    /*
    * 解析下一页
    * */
    private void listPageDynamicParseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String sort_type = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("sort_type");//发布方式
        String tab_name = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("tab_name");//板块标签
        String motor_id = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("motor_id");
        String has_more = page.getJson().jsonPath($_type + ".data.has_more").get();
        if (!has_more.equals("true")){//判断是否有下一页
            return;
        }
        String next_offset = page.getJson().jsonPath($_type + ".data.next_offset").get();
        List<String> list = page.getJson().jsonPath($_type + ".data.cheyou_list").all();
        String display_time = null;
        Map<String, String> map = new HashMap<>();
        for (String l : list) {
            JSONObject dataList = JSONObject.parseObject(l);
            String type = (String) dataList.get("article_sub_type");//用于判断是 文章 还是  问答  还是视频
            String unique_id = (String) dataList.get("gid_str");
            String articleUrl = ARTICLE + unique_id;
            display_time = (String) dataList.get("latest_comment_time");//获取到文章发表时间
            if (display_time.equals("0")){
                display_time = dataList.getString("display_time");
            }
            display_time = display_time + "000";
            if (!this.isDateRange(crawlerRecord,Long.valueOf(display_time))){//判断是在时间范围内
                display_time = null;
                break;
            }
            map.put(articleUrl, display_time + "/" + type);
            crawlerRecord.tagsCreator().bizTags().addCustomKV("module", tab_name);//用于判断是哪个板块
            this.parsePageArticle(articleUrl, display_time, crawlerRecord, parseLinks, type);//得到文章的url 进入下一步解析
            }
        crawlerRecord.getHttpRequest().setHeaders(map);
        String turnPage = String.format(LIST,motor_id,sort_type,tab_name,next_offset);
        if (display_time == null){
            return;
        }
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(turnPage)
                .releaseTime(Long.valueOf(display_time))
                .copyResultTags()
                .copyBizTags()
                .build();
        parseLinks.add(record);
    }
    /**
     * 判断是否在时间范围内
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord,Long releaseTimeToLong){
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    }else if(hourFromNow != 0){
                        endTime = System.currentTimeMillis()-60000;//系统时间减去一分钟
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if(startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime){
                isRange = true;
            }
        }else{
            isRange = true;
        }
        return isRange;
    }

    /*
    *
    * */
    private void parsePageArticle(String articleUrl, String time, CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> parseLinks, String type) {
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(articleUrl)
                .releaseTime(Long.valueOf(time))
                .copyBizTags()
                .copyResultTags()
                .build();
        record.tagsCreator().bizTags().addCustomKV(TYPE,type);//标识 是否 为文章 或者 问答  或者视频
        parseLinks.add(record);
    }

    /*
    * 解析动态 第一页url
    * */
    private void dynamicParseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        KVTag moduleTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("moduleTag");
        KVTag moduleSort = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("moduleSort");
        List<String> val1 = (List<String>) moduleTag.getVal();//模块标签
        List<String> val2 = (List<String>) moduleSort.getVal();//发布方式
        for (int i = 0; i <val1.size() ; i++) {
            String json = page.getHtml().xpath("//*[@id=\"__NEXT_DATA__\"]").get();
            JSONObject jsonObject = JSONObject.parseObject(json);
            JSONObject props = jsonObject.getJSONObject("props");
            JSONObject pageProps = props.getJSONObject("pageProps");
            JSONObject cheyouHead = pageProps.getJSONObject("cheyouHead");
            String motor_id = (String) cheyouHead.get("motor_id");
            String sort_type = val2.get(i); //发布方式
            String tab_name = val1.get(i);  //板块标签
            String offset = "0";
            String listUrl = String.format(LIST,motor_id,sort_type,tab_name,offset);

            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(listUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            record.tagsCreator().bizTags().addCustomKV("sort_type",sort_type);
            record.tagsCreator().bizTags().addCustomKV("tab_name",tab_name);
            record.tagsCreator().bizTags().addCustomKV("motor_id",motor_id);
            parseLinks.add(record);
        }

    }



    /*
    * 解析出车友圈页面的url
    * */
    private void CheXiParseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        Map<String,String> authorMap = new HashMap<>();
        crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",authorMap);
        String url = crawlerRecord.getHttpRequest().getUrl();
        String dynamicUrl = page.getHtml().xpath("//*[@id=\"anchor-nav\"]/ul/li[4]/a/@href").get();
        if (StringUtils.isBlank(dynamicUrl)){
            dynamicUrl = page.getHtml().xpath("//*[@id=\"anchor-nav\"]/li[4]/a/@href").get();
        }
        dynamicUrl = DONGCHEDI + dynamicUrl;
        String brand = page.getHtml().xpath("//*[@id=\"__next\"]/div/div[2]/div/div[1]/p/span[3]/a/text()").get();//品牌
        String seriesName = page.getHtml().xpath("//*[@id=\"__next\"]/div/div[2]/div/div[1]/p/span[4]//text()").get();//车系
        String seriesId = url.substring(url.lastIndexOf("/")+1);
        List<Map<String,String>> seriesList = new ArrayList<>();
        Map<String,String> map = new HashMap<>();
        map.put("brand",brand);
        map.put("seriesName",seriesName);
        map.put("seriesId",seriesId);
        seriesList.add(map);
        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(dynamicUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(AutoVMCommonField.Tag_Field_Series,seriesList);
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(AutoVMCommonField.Tag_Field_Brand,brand);
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV("seriesId",seriesId);
        parseLinks.add(crawlerRequestRecord);
    }


    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerData = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.article)){
            if (crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("accumulateUrl").equals("accumulateUrl")) {
                if (url.matches(LISTURL)) {
                    this.washListPage(crawlerRecord, page, crawlerData);
                }
            }else{
                if(url.matches(ARTICLEURL) && !url.matches(articleUrl2)){
                    this.washArticle(crawlerRecord,page,crawlerData);//清洗文章
                }
            }

        }
        if(crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.interaction)){
            if (url.matches(ARTICLEURL )&& (crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("turn_page")==null)){//清洗文章内容
                    this.washInteraction(crawlerRecord,page,crawlerData);//清洗互动量
            }
        }
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.comment)){
            if (url.matches(ARTICLEURL)){
                this.washArticleFirstComment(crawlerRecord,page,crawlerData);//清洗文章第一页评论
            }
            if (url.matches(COMMENTSURL)){//清洗文章下一页评论
                this.washArticleTurnPageComment(crawlerRecord,page,crawlerData);
            }
        }

        return crawlerData;
    }
    /*
    * //清洗文章下一页评论
    * */
    private void washArticleTurnPageComment(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        this.module(crawlerRecord);
        List<String> all = page.getJson().jsonPath($_type + ".data.reply_list").all();
        KVTag tag =  crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("authorMap");
        Map<String,String> authorMap = null;
        if (tag == null){
            authorMap = new HashMap<>();
        }else{
            authorMap  = (Map<String, String>) tag.getVal();
        }

        for (String data : all){
            JSONObject jsonObject = JSONObject.parseObject(data);
            String reply_id = (String) jsonObject.get("reply_id");//回复的id
            String group_id = (String) jsonObject.get("group_id");//文章id
            JSONObject profile_info = jsonObject.getJSONObject("profile_info");
            String user_id = (String) profile_info.get("user_id");//作者id
            String follows = authorMap.get(user_id);
            if (follows == null){follows = "0";}
            String name = (String) profile_info.get("name");//作者姓名
            String text = (String) jsonObject.get("text");//回复的内容
            String create_time = (String) jsonObject.get("create_time");//文章回复时间
            if (!isDateRange(crawlerRecord,Long.valueOf(create_time+"000"))){//筛选时间范围内的评论
                return;
            }
            JSONArray image_urls = jsonObject.getJSONArray("image_urls");//评论的图片路径
            StringBuffer content = new StringBuffer();
            if (image_urls != null){
                for (Object img : image_urls){
                    JSONObject images = JSONObject.parseObject((String) img);
                    String url = (String) images.get("url");
                    content.append(url).append("\\x01");
                }
            }
            crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",null);
            //得到评论去重的信息
            KVTag filter_info = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(COMMONFILTER);
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filter_info.getVal(), CrawlerRecord.class);
            CrawlerData crawlerData1 = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.comment,reply_id))
                    .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,group_id))
                    .releaseTime(Long.valueOf(create_time+"000"))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                    .addContentKV(AICCommonField.Field_Author,name)
                    .addContentKV(AICCommonField.Field_Author_Id,user_id)
                    .addContentKV(AutoVMCommonField.Field_Author_Follows,follows)
                    .addContentKV(AICCommonField.Field_Images, String.valueOf(content))
                    .addContentKV(AICCommonField.Field_Content,text)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",authorMap);
            crawlerData1.setFilter(filterInfoRecord.getFilter());
            crawlerData1.setFilterInfos(filterInfoRecord.getFilterInfos());
            crawlerData1.setFilterPipelineResult(true);
            crawlerData.add(crawlerData1);
        }
    }

    //清洗文章第一页评论
    private void washArticleFirstComment(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        String json = page.getHtml().xpath("//*[@id=\"__NEXT_DATA__\"]//text()").get();
        JSONObject jsonObject = JSONObject.parseObject(json);
        JSONObject props = jsonObject.getJSONObject("props");
        JSONObject pageProps = props.getJSONObject("pageProps");
        JSONObject comment = pageProps.getJSONObject("comment");
        JSONArray comment_data = comment.getJSONArray("comment_data");
        for(Object data : comment_data){
            this.commentsFirstAll(data,crawlerRecord,page,crawlerData);//清洗第一页的评论
        }

    }
    /*
    * 清洗第一页评论
    * */
    private void commentsFirstAll(Object data, CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData>  crawlerData) {
        this.module(crawlerRecord);
        KVTag tag =  crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("authorMap");
        Map<String,String> authorMap = null;
        if (tag == null){
            authorMap = new HashMap<>();
        }else{
            authorMap  = (Map<String, String>) tag.getVal();
        }

        JSONObject jsonObject1 = JSONObject.parseObject((String) data);//获取到评论
        String group_id = (String) jsonObject1.get("group_id_str");//获取到父类id
        String comment_id = (String) jsonObject1.get("comment_id");//获取到每个评论的id
        JSONObject profile_info = jsonObject1.getJSONObject("profile_info");
        String user_id = (String) profile_info.get("user_id");//作者id
        String name = (String) profile_info.get("name");//作者名字
        String follows = null;
        if (authorMap != null){
            follows = authorMap.get(user_id);//作者粉丝数
        }
        if (follows == null){follows = "0";}
        String text = (String) jsonObject1.get("text");//评论内容
        String create_time = (String) jsonObject1.get("create_time");//评论的发表时间
        JSONObject reply_data = jsonObject1.getJSONObject("reply_data");//清洗评论中的回复
        String count = "0";
        String total_count = "0";
        if (reply_data !=null){
            JSONArray reply_list = reply_data.getJSONArray("reply_list");
            if (reply_list != null){
                for (Object reply : reply_list){
                    this.commentsReply(reply,crawlerRecord,page,crawlerData);//清洗评论中的回复
                }
            }
            total_count = (String) reply_data.get("total_count");//评论被回复的点赞数
            count = (String) reply_data.get("count");//评论被回复的个数
        }
        JSONArray image_urls = jsonObject1.getJSONArray("image_urls");//评论的图片路径
        StringBuffer content = new StringBuffer();
        if (image_urls != null){
            for (Object img : image_urls){
                JSONObject images = JSONObject.parseObject((String) img);
                String url = (String) images.get("url");
                content.append(url).append("\\x01");
            }
        }
        crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",null);
        //得到评论去重的信息
        KVTag filter_info = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(COMMONFILTER);
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filter_info.getVal(), CrawlerRecord.class);
        crawlerRecord.setFilter(filterInfoRecord.getFilter());
        crawlerRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
        if (!isDateRange(crawlerRecord,Long.valueOf(create_time+"000"))){//筛选时间范围内的评论
            return;
        }
        CrawlerData crawlerData1 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.comment,comment_id))
                .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,group_id))
                .releaseTime(Long.valueOf(create_time+"000"))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                .addContentKV(AICCommonField.Field_Author,name)
                .addContentKV(AICCommonField.Field_Author_Id,user_id)
                .addContentKV(AutoVMCommonField.Field_Author_Follows,follows)
                .addContentKV(AICCommonField.Field_Images, String.valueOf(content))
                .addContentKV(AICCommonField.Field_Content,text)
                .flowInPipelineTag("kafka")
                .build();
        crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",authorMap);
        crawlerData1.setFilterPipelineResult(true);
        crawlerData.add(crawlerData1);
        //interaction数据
        CrawlerData crawlerData2 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.interaction,comment_id))
                .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.comment,comment_id))
                .releaseTime(Long.valueOf(create_time+"000"))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                .addContentKV(AICCommonField.Field_I_Comments,count)
                .addContentKV(AICCommonField.Field_I_Likes,total_count)
                .flowInPipelineTag("kafka")
                .build();
        crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",authorMap);
        crawlerData2.setFilterPipelineResult(true);
        crawlerData.add(crawlerData2);


    }
    /*
    * 清洗评论中的回复
    * */
    private void commentsReply(Object reply, CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        this.module(crawlerRecord);
        KVTag tag =  crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("authorMap");
        Map<String,String> authorMap = null;
        if (tag == null){
            authorMap = new HashMap<>();
        }else{
            authorMap  = (Map<String, String>) tag.getVal();
        }
        JSONObject jsonObject = JSONObject.parseObject((String) reply);
        String reply_id = (String) jsonObject.get("reply_id");//评论回复的id
        String group_id = (String) jsonObject.get("group_id_str");//文章的id
        JSONObject profile_info = jsonObject.getJSONObject("profile_info");
        String user_id = (String) profile_info.get("user_id");//作者id
        String name = (String) profile_info.get("name");//作者名字
        String follows = null;
        if (authorMap != null){
            follows = authorMap.get(user_id);//作者粉丝数
        }
        if (follows == null){follows = "0";}
        String text = (String) jsonObject.get("text");//评论内容
        String digg_count = (String) jsonObject.get("digg_count");//评论被点赞
        String create_time = (String) jsonObject.get("create_time");
        JSONArray image_urls = jsonObject.getJSONArray("image_urls");//评论的图片路径
        StringBuffer content = new StringBuffer();//评论图片路径
        if (image_urls != null){
            for (Object img : image_urls){
                JSONObject images = JSONObject.parseObject((String) img);
                String url = (String) images.get("url");
                content.append(url).append("\\x01");
            }
        }
        crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",null);
        KVTag filter_info = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(COMMONFILTER);
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filter_info.getVal(), CrawlerRecord.class);
        crawlerRecord.setFilter(filterInfoRecord.getFilter());
        crawlerRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
        if (!isDateRange(crawlerRecord,Long.valueOf(create_time+"000"))){//筛选时间范围内的评论
            return;
        }
        CrawlerData crawlerData1 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.comment,reply_id))
                .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,group_id))
                .releaseTime(Long.valueOf(create_time+"000"))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                .addContentKV(AICCommonField.Field_Author,name)
                .addContentKV(AICCommonField.Field_Author_Id,user_id)
                .addContentKV(AutoVMCommonField.Field_Author_Follows,follows)
                .addContentKV(AICCommonField.Field_Images, String.valueOf(content))
                .addContentKV(AICCommonField.Field_Content,text)
                .flowInPipelineTag("kafka")
                .build();
        crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",authorMap);
        crawlerData1.setFilterPipelineResult(true);
        crawlerData1.setFilter(filterInfoRecord.getFilter());
        crawlerData1.setFilterInfos(filterInfoRecord.getFilterInfos());
        crawlerData.add(crawlerData1);

        CrawlerData crawlerData2 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.interaction,reply_id))
                .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,group_id))
                .releaseTime(Long.valueOf(create_time+"000"))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                .addContentKV(AICCommonField.Field_I_Likes,digg_count)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData2.setFilterPipelineResult(true);
        crawlerData2.setFilter(filterInfoRecord.getFilter());
        crawlerData2.setFilterInfos(filterInfoRecord.getFilterInfos());
        crawlerData.add(crawlerData2);
    }


    //清洗互动量
    private void washInteraction(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData>  crawlerData) {
        this.module(crawlerRecord);
        String json = page.getHtml().xpath("//*[@id=\"__NEXT_DATA__\"]//text()").get();
        JSONObject jsonObject = JSONObject.parseObject(json);
        JSONObject props = jsonObject.getJSONObject("props");
        JSONObject pageProps = props.getJSONObject("pageProps");
        JSONObject articleData = pageProps.getJSONObject("articleData");
        JSONObject data = articleData.getJSONObject("data");
        String comment_count = (String) data.get("comment_count");//得到全部的评论数
        String likes = (String) data.get("digg_count");//得到全部的点赞数
        String article_id = (String) data.get("group_id_str");//文章编号
        String created_time = (String) data.get("created_time");//文章的创建时间
        created_time = created_time + "000";
        CrawlerData crawlerData1 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.interaction,article_id))
                .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,article_id))
                .releaseTime(Long.valueOf(created_time))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                .addContentKV(AICCommonField.Field_I_Comments,comment_count)
                .addContentKV(AICCommonField.Field_I_Likes,likes)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData1.setFilterPipelineResult(true);
        crawlerData.add(crawlerData1);
    }

    /*
    * 清洗文章内容
    * */
    private void washArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        this.module(crawlerRecord);
        KVTag tag =  crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("authorMap");
        Map<String,String> authorMap = null;
        if (tag == null){
            authorMap = new HashMap<>();
        }else{
            authorMap  = (Map<String, String>) tag.getVal();
        }

        String json = page.getHtml().xpath("//*[@id=\"__NEXT_DATA__\"]//text()").get();
        JSONObject jsonObject = JSONObject.parseObject(json);
        JSONObject props = jsonObject.getJSONObject("props");
        JSONObject pageProps = props.getJSONObject("pageProps");
        JSONObject articleData = pageProps.getJSONObject("articleData");
        JSONObject data = articleData.getJSONObject("data");
        String article_id = (String) data.get("group_id_str");//文章编号
        JSONArray image_urls = data.getJSONArray("image_urls");//获取到所有的图片
        StringBuffer content = new StringBuffer();
        if (image_urls != null){
            for (Object img : image_urls){
                JSONObject jsonObject1 = JSONObject.parseObject((String) img);
                String url = (String) jsonObject1.get("url");
                content.append(url).append("\\x01");
            }
        }
        String title = (String) data.get("motor_title");//获取文章的标题
        String contents = (String) data.get("content");//获取文章的内容
        if (contents.contains("</p>")){
            Html html = page.getHtml();
            page.setHtml(Html.create(contents));
            contents = page.getHtml().xpath("//text()").get();
            page.setHtml(html);
        }

        String created_time = (String) data.get("created_time");//文章的创建时间
        created_time = created_time + "000";
        JSONObject motor_profile_info = data.getJSONObject("motor_profile_info");
        String user_id = (String) motor_profile_info.get("user_id");//获取到作者id
        String name = (String) motor_profile_info.get("name");//获取作者名字
        String follows = authorMap.get(user_id);//获取到粉丝数
        crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",null);
        CrawlerData crawlerData1 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,article_id))
                .releaseTime(Long.valueOf(created_time))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .addContentKV(AICCommonField.Field_Author,name)
                .addContentKV(AICCommonField.Field_Author_Id,user_id)
                .addContentKV(AutoVMCommonField.Field_Author_Follows,follows)
                .addContentKV(AICCommonField.Field_Images, String.valueOf(content))
                .addContentKV(AICCommonField.Field_Title,title)
                .addContentKV(AICCommonField.Field_Content,contents)
                .flowInPipelineTag("kafka")
                .build();
        crawlerRecord.tagsCreator().bizTags().addCustomKV("authorMap",authorMap);
        crawlerData1.setFilterPipelineResult(true);
        crawlerData.add(crawlerData1);
    }

    private CrawlerRequestRecord module(CrawlerRequestRecord crawlerRecord){
        String module = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("module");
        switch (module){
            case "reply":
                module = "动态板块";
                break;
            case "selected":
                module = "精华板块";
                break;
            case "series_wenda_publish":
                module = "问答板块";
                break;
            default:
                module = "评车板块";
                break;
        }
        crawlerRecord.tagsCreator().bizTags().addCustomKV("module",module);
        return crawlerRecord;
    }
    /*
    * 清洗列表链接
    * */
    private void washListPage(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        Map<String, String> map = crawlerRecord.getHttpRequest().getHeaders();
        this.module(crawlerRecord);
        crawlerRecord.getHttpRequest().setHeaders(null);
        Set<String> set = map.keySet();
        Iterator<String> iterator = set.iterator();
        while (iterator.hasNext()){
            String url = iterator.next();
            String[] text = map.get(url).split("/");
            String time = text[0];
            String type = text[1];
            if (type.equals("8")){

                type = "问答";
            }else if (type.equals("6")){
                type = "视频";
            }else {
                type = "文章";
            }
            String[] split = url.split("/");
            String articleId = split[5];//粉丝id
            CrawlerData crawlerData1 = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,articleId))
                    .url(url)
                    .releaseTime(Long.valueOf(time))
                    .addContentKV("releaseTimeToLong",time)
                    .addContentKV("itemUrl",url)
                    .addContentKV(AutoVMCommonField.Tag_Field_Data_Type,type)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .flowInPipelineTag("redis")
                    .build();
            crawlerData1.setFilterPipelineResult(true);
            crawlerData.add(crawlerData1);
        }
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }
    /*
     * 下在失败 重新下载
     * */
    private void recordAgainRequest(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> linksRecords) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(RECORD_AGAIN_REQUEST)){//判断是否重新下载过
            count = (int)crawlerBusinessTags.getCategoryTag().getKVTag(RECORD_AGAIN_REQUEST).getVal();
            if (count >= 5){
                log.error(DOMAIN + " download page the number of retries exceeds the limit,request url {}",crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }
        String url = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get(TURN_PAGE_ITEM_REQUEST);
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null){
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .releaseTime(System.currentTimeMillis())
                    .httpUrl(url)
                    .recordKey(crawlerRecord.getRecordKey()+count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }else{
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .releaseTime(System.currentTimeMillis())
                    .httpUrl(url)
                    .recordKey(crawlerRecord.getRecordKey()+count)
                    .copyBizTags()
                    .copyRequestTags()
                    .build();
        }
        if (crawlerRequestRecord == null){
            return;
        }
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(RECORD_AGAIN_REQUEST,count);
        linksRecords.add(crawlerRequestRecord);
    }

}
