package com.chance.cc.crawler.development.scripts.xcar.xcarSeriesNews;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.http.HttpClientDownloader;
import com.chance.cc.crawler.core.downloader.http.HttpClientGenerator;
import com.chance.cc.crawler.core.downloader.proxy.SiteConfigProxyProvider;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import jdk.nashorn.api.scripting.NashornScriptEngineFactory;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.script.*;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @ClassName XcarNewsArticleClass
 * @Description TODO
 * @Author songding
 * @Date 2021/8/12 10:11
 * @Version 1.0
 * 爱卡汽车   车系文章板块采集
 * 30天文章链接累计  和 主贴 互动量  全量回复
 **/
public class XcarSeriesNewsCrawlerScript extends CrawlerCommonScript {
    private Logger log = LoggerFactory.getLogger(XcarSeriesNewsCrawlerScript.class);

    private static final String DOMAIN = "xcar";
    private static final String SIDE = "SN";
    private static final String RECORD_AGAIN_REQUEST = "record_again_request";//判断重新下载
    private static final String TURN_PAGE_ITEM_REQUEST = "turn_page_item_request";//判断重新下载
    private static final String KEYS = "keys";
    private static final String DOMAIN_RESULT_JSON_RECORD_TAG = "domain_result_json";//初始record结果字段


    private static final String HTTPS = "https:";
    private static final String XcarUrl = "https://newcar.xcar.com.cn";
    private static final String xcarUrlNew = "https://newcar.xcar.com.cn/";

    private static final String Url = "https://newcar.xcar.com.cn/\\S*/";
    private static final String TurnPageUrl = "/\\S*/news\\S*.htm";
    private static final String NewsList = "https://newcar.xcar.com.cn/\\S*/news.htm";
    private static final String NewsListTurnPageZero = "https://newcar.xcar.com.cn/\\S*/news_1\\S*.htm";
    private static final String NewsListArticleOne = "https://info.xcar.com.cn/\\S*/news\\S*.html";
    private static final String NewsListArticleTwo = "https://newcar.xcar.com.cn/\\S*/news\\S*.html";
    private static final String NewsListArticleThree = "https://drive.xcar.com.cn/\\S*/news\\S*.html";
    private static final String MasterArticleOne = "https://newcar.xcar.com.cn/\\S*/news\\S*.html\\?viewtype=all#page_1";//文章存在翻页 url
    private static final String MasterArticleTwo = "https://info.xcar.com.cn/\\S*/news\\S*.html\\?viewtype=all#page_1";//文章存在翻页 url
    private static final String MasterArticleThree = "https://drive.xcar.com.cn/\\S*/news\\S*.html\\?viewtype=all#page_1";//文章存在翻页 url
    private static final String UserUrl = "https://my.xcar.com.cn/space.php\\?uid=\\S*";
    private static final String CommonUrl = "https://comment.xcar.com.cn/comment.php\\?nid=\\S*";//评论url
    private static final String CommonUrlPageOne = "https://comment.xcar.com.cn/interface/index.php\\?iact=CommentLevel&" +
            "cid=\\S*&action=getNewsComment&sort=ups&ctype=0&page=\\S*&limit=25&_=\\S*";//评论翻页
    private static final String ArticleCommonUrl = "https://comment.xcar.com.cn/comment.php\\?nid=\\S*&ctype=0";//文章不存在翻页的评论url
    private static final String CommonUrlPageTwo = "https://comment.xcar.com.cn/interface/index.php\\?" +
            "iact=CommentLevel&cid=\\S*&action=getNewsComment&sort=time&ctype=0&page=\\S*&limit=25&_=\\S*";

    private static final String MysqlUrl = "http://192.168.1.215:9599/crawler/domain/common/api/v1/\\S*";

    public Map<String, CompiledScript> compiledScriptMap;
    public ScriptEngine scriptEngine;
    @Override
    public String domain() {
        return DOMAIN;
    }
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        return crawlerRecord.tagsCreator().bizTags().site().equals(SIDE);
    }
    @Override
    public void initUrlRegulars() {
        addUrlRegular(MysqlUrl);
        addUrlRegular(Url);
        addUrlRegular("https://newcar.xcar.com.cn/");
        addUrlRegular(xcarUrlNew);
        addUrlRegular(NewsList);
        addUrlRegular(NewsListTurnPageZero);
        addUrlRegular(NewsListArticleOne);
        addUrlRegular(NewsListArticleTwo);
        addUrlRegular(NewsListArticleThree);
        addUrlRegular(MasterArticleOne);
        addUrlRegular(MasterArticleTwo);
        addUrlRegular(MasterArticleThree);
        addUrlRegular(UserUrl);
        addUrlRegular(CommonUrl);
        addUrlRegular(CommonUrlPageOne);
        addUrlRegular(CommonUrlPageTwo);
        addUrlRegular(ArticleCommonUrl);
    }
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportSourceRecords){
        List<CrawlerRecord> prepareLinks = new ArrayList<>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
            for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
                String url = supportSourceRecord.getHttpRequest().getUrl();
                if (url.contains("keys")) {
                    this.initKeyWord(crawlerRequestRecord, supportSourceRecord);
                    List<String> keys = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagListVal(KEYS);
                    crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(KEYS, null);
                    for (int i = 0; i < keys.size(); i++) {
                        String url1 = xcarUrlNew + keys.get(i)+"/";//得到车系页url
                        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .httpUrl(url1)
                                .recordKey(url1)
                                .releaseTime(Long.valueOf(System.currentTimeMillis()))
                                .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                                .copyResultTags()
                                .copyBizTags()
                                .build();
                        record.tagsCreator().bizTags().addKeywords(keys.get(i));
                        prepareLinks.add(record);
                    }
                }
            }
        //回溯采集
        if (!crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("trace").equals("trace")){
            log.info("不能进入回溯方法");
            return prepareLinks;
        }
        //从数据库中取出链接
        if(requestUrl.matches(xcarUrlNew)){
            log.info("不能进入取出连接 " + crawlerRequestRecord.tagsCreator().bizTags().hasKVTag(DOMAIN_RESULT_JSON_RECORD_TAG));
            if (crawlerRequestRecord.tagsCreator().bizTags().hasKVTag(DOMAIN_RESULT_JSON_RECORD_TAG)) {
                KVTag domainResultJson = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(DOMAIN_RESULT_JSON_RECORD_TAG);
                CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()), CrawlerDomainUrls.class);
                String url = crawlerDomainUrls.getUrl();
                Json urlJson = new Json(url);
                String itemUrl = urlJson.jsonPath($_type + ".url").get();
                long releaseTimeToLong = Long.parseLong(urlJson.jsonPath($_type + ".releaseTime").get());
                if (!isDateRange(crawlerRequestRecord, releaseTimeToLong)) {
                    return prepareLinks;
                }
                crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag(DOMAIN_RESULT_JSON_RECORD_TAG);
                CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                prepareLinks.add(record);

            }
        }

        return prepareLinks;
    }

    //取出代表不同的车系key
    private void initKeyWord(CrawlerRequestRecord crawlerRequestRecord, CrawlerRequestRecord supportSourceRecord) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type+".msg").get();
        if (!"success".equals(msg)){
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }
        List<String> keys = new ArrayList<>();
        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all){
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");
            keys.add(keyword);
        }
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(KEYS, keys);
    }




    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {

        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if ((!page.isDownloadSuccess()) || page.getStatusCode() != 200){
            //log.error("detail  is  [{}]",JSONObject.toJSONString(page));
            log.error("download page !=200 or page ==null"+url+page.getStatusCode());
            if (page.getStatusCode() == 521){
                initCompileScript();//初始化加密方式
                this.cookieUpdate(page,crawlerRecord,parseLinks);
            }else if(page.getStatusCode() != 404){//重新下载
                this.recordAgainRequest(crawlerRecord,parseLinks);
                crawlerRecord.setNeedWashPage(false);
                return parseLinks;
            }
            crawlerRecord.setNeedWashPage(false);
        }
        if (url.matches(Url)){//车系页
            this.getNewUrl(crawlerRecord,page,parseLinks);
        }
        if(url.matches(NewsList) || url.matches(NewsListTurnPageZero)){//列表页and下一页
            this.parseNewsListUrl(crawlerRecord,page,parseLinks);
        }
        //文章页
        if(url.matches(NewsListArticleOne) || url.matches(NewsListArticleTwo) || url.matches(NewsListArticleThree)){
            this.parseArticle(crawlerRecord,page,parseLinks);
        }
        //文章存在翻页url
        if (url.matches(MasterArticleOne) || url.matches(MasterArticleTwo) ||url.matches(MasterArticleThree)){
            this.parserMasterArticle(crawlerRecord,page,parseLinks);
        }
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.comment)){
            if (url.matches(CommonUrl)){
                this.parserCommonPage(crawlerRecord,page,parseLinks);//文章会翻页   的评论
            }
            if (url.matches(CommonUrlPageOne) || url.matches(CommonUrlPageTwo)){
                this.judgeCommon(crawlerRecord,page,parseLinks);//判断当前页是否存在评论翻页
            }
        }

        return parseLinks;
    }

    private void cookieTwoUpdate(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        KVTag cookie = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("cookieMap");
        Map<String,String> cookieMap = (Map<String, String>) cookie.getVal();
        Matcher mtAuthor = Pattern.compile("};go\\((.*?)\\)</script>").matcher(page.getRawText());
        if (mtAuthor.find()){
            try {
                String cookieGen = mtAuthor.group(1);
                Json cookieGenJson = new Json(cookieGen);
                String ha = cookieGenJson.jsonPath($_type + ".ha").get();
                log.info("xcar cookie cipher code {}",ha);
                compiledScriptMap.get(ha).eval();
                Invocable invocable = (Invocable) compiledScriptMap.get(ha).getEngine();
                String result = (String) invocable.invokeFunction("go", JSON.parseObject(cookieGen, Map.class));
                result =  result.substring(result.indexOf("=")+1, result.indexOf(";"));
                cookieMap.put("__jsl_clearance_s",result);
                result = processCookie(cookieMap);
                updateLocalAuthInfo(result);
                log.info("xcar cookie second update result {}",result);
            } catch (ScriptException e) {
                e.printStackTrace();
            } catch (NoSuchMethodException e) {
                e.printStackTrace();
            }

        }
    }

    private void cookieUpdate(HttpPage page, CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> parseLinks){
        HttpClientDownloader downloader = new HttpClientDownloader();
        downloader.setClientGenerator(new HttpClientGenerator());
        downloader.setProxyProvider(new SiteConfigProxyProvider());
        HttpPage httpPage = downloader.download(crawlerRequestRecord.getHttpRequest(), crawlerRequestRecord.getHttpConfig());
        String js = httpPage.getRawText().substring(httpPage.getRawText().indexOf("("), httpPage.getRawText().lastIndexOf(")")+1);
        Map<String,String> cookieMap = new HashMap<>();
        try {
            String eval = (String) scriptEngine.eval(js);
            eval =  eval.substring(eval.indexOf("=")+1, eval.indexOf(";"));
            cookieMap.put("__jsl_clearance_s",eval);
            log.info("xcar cookie second update result __jsl_clearance_s={}",eval);
            crawlerRequestRecord.getHttpRequest().addHeader("cookie",processCookie(cookieMap));

            // 2. 生成 第二个 加密 cookie
            httpPage = downloader.download(crawlerRequestRecord.getHttpRequest(), crawlerRequestRecord.getHttpConfig());
            Matcher mtAuthor = Pattern.compile("};go\\((.*?)\\)</script>").matcher(httpPage.getRawText());
            if (mtAuthor.find()){
                String cookieGen = mtAuthor.group(1);
                Json cookieGenJson = new Json(cookieGen);
                String ha = cookieGenJson.jsonPath($_type + ".ha").get();
                log.info("xcar cookie cipher code {}",ha);
                compiledScriptMap.get(ha).eval();
                Invocable invocable = (Invocable) compiledScriptMap.get(ha).getEngine();
                String result = (String) invocable.invokeFunction("go",JSON.parseObject(cookieGen, Map.class));
                result =  result.substring(result.indexOf("=")+1, result.indexOf(";"));
                cookieMap.put("__jsl_clearance_s",result);
                result = processCookie(cookieMap);
                updateLocalAuthInfo(result);
                log.info("xcar cookie second update result {}",result);
            }

        } catch (Exception e) {
            log.error("xcar cookie update error {}",e.getMessage());
        }

    }
    private String processCookie(Map<String,String> cookieMap){
        StringBuffer cookieSB = new StringBuffer();
        for (Map.Entry<String, String> entry : cookieMap.entrySet()) {
            cookieSB.append(entry.getKey())
                    .append("=")
                    .append(entry.getValue())
                    .append(";");
        }
        return cookieSB.substring(0,cookieSB.lastIndexOf(";"));
    }
    private void initCompileScript(){
        if (compiledScriptMap == null){
            compiledScriptMap = new HashMap<>();
        }

        ScriptEngineManager sm = new ScriptEngineManager();
        NashornScriptEngineFactory factory = null;
        for (ScriptEngineFactory f : sm.getEngineFactories()) {
            if (f.getEngineName().equalsIgnoreCase("Oracle Nashorn")) {
                factory = (NashornScriptEngineFactory)f;
                break;
            }
        }
        String[] stringArray = ArrayUtils.toArray("-doe", "--global-per-engine");
        scriptEngine = factory.getScriptEngine(stringArray);
        List<String> ciphers = Arrays.asList("md5", "sha1", "sha256");
        //E:\chance-crawler-development\crawler-dev-bootstrap\src\main\java\com\chance\cc\crawler\development\bootstrap\xcar\js\
       //String filePathPrefix = "E:\\chance-crawler-development\\crawler-dev-bootstrap\\src\\main\\java\\com\\chance\\cc\\crawler\\development\\bootstrap\\xcar\\js\\";
        String filePathPrefix = "/data/chance_crawler_runner/domain/xcar/cipher_js/";

        for (String cipher : ciphers) {
            String filePath = filePathPrefix+cipher+".js";
            log.info("xcar cipher js file path {}",filePath);
            final CompiledScript compiled;
            try {
                compiled = ((Compilable)scriptEngine).compile(new FileReader(filePath));
                compiledScriptMap.put(cipher,compiled);
            } catch (ScriptException e) {
                e.printStackTrace();
            } catch (FileNotFoundException e) {
                e.printStackTrace();
            }

        }
    }
    //评论从第二页继续获取第三页评论链接
    private void judgeCommon(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        String info = page.getJson().jsonPath($_type + ".info").get();
        JSONObject jsonObject = JSONObject.parseObject(info);
        String message = (String) jsonObject.get("message");
        if (message.equals("获取失败")){
            crawlerRecord.setNeedWashPage(false);
            return;
        }
        String config = page.getJson().jsonPath($_type+".config").get();
        JSONObject jsonObject1 = JSONObject.parseObject(config);
        String cup = (String) jsonObject1.get("cup");
        String count = (String) jsonObject1.get("count");//评论数
        if (count.equals("247")){
            log.error("Article comments cannot be displayed have Anti - crawler technology error 247 Don't parse turn page");
            return;
        }
        List<String> all = page.getJson().jsonPath($_type + ".list").all();
        for (String s : all) {
            JSONObject jsonObjectTime = JSONObject.parseObject(s);
            String pub_time = (String) jsonObjectTime.get("pub_time");//评论发表时间
            Long releaseTime = 0l;
            if (pub_time.contains("昨天")) {
                releaseTime = System.currentTimeMillis();
                releaseTime = releaseTime - 86400000l;
            }
            if (pub_time.contains("分钟前") || pub_time.contains("小时")) {
                releaseTime = System.currentTimeMillis();
                releaseTime = releaseTime - 86400000l;
            }
            if (pub_time.contains("前天")) {
                releaseTime = System.currentTimeMillis();
                releaseTime = releaseTime - 172800000l;
            }
            try {
                if (releaseTime == 0l) {
                    if (pub_time.contains("年")) {
                        releaseTime = DateUtils.parseDate(pub_time, "yyyy年MM月dd日").getTime();
                    } else {
                        releaseTime = DateUtils.parseDate("2021年" + pub_time, "yyyy年MM月dd日").getTime();
                    }
                }
                //如果众多评论存在不满足时间过滤条件   不取下一页评论url
                if(this.isDateRange(crawlerRecord,releaseTime) ==false){
                    return;
                };
            } catch (ParseException e) {
                log.error("get article time fail");
            }
        }
        String news = page.getJson().jsonPath($_type + ".news").get();
        JSONObject news1 = JSONObject.parseObject(news);
        String cid = (String) news1.get("cid");
        String pageUrl ="https://comment.xcar.com.cn/interface/index.php?iact=CommentLevel&cid="+cid+"&amp&action=getNewsComment&sort=time&ctype=0&page="+(Integer.valueOf(cup)+1)+"&limit=25&_="+System.currentTimeMillis();
        KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(pageUrl)
                .recordKey(pageUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                .notFilterRecord()
                .copyResultTags()
                .build();
        record.tagsCreator().bizTags().addCustomKV("repetition",pageUrl);//判断是否有重复url
        record.setFilter(filterInfoRecord.getFilter());
        record.setFilterInfos(filterInfoRecord.getFilterInfos());
        record.setTurnPageFilterInfo(filterInfoRecord.getTurnPageFilterInfo());
        parseLinks.add(record);
    }

    /**
     * 判断是否在时间范围内
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord,Long releaseTimeToLong){
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    }else if(hourFromNow != 0){
                        endTime = System.currentTimeMillis()-60000;//系统时间减去一分钟
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if(startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime){
                isRange = true;
            }
        }else{
            isRange = true;
        }
        return isRange;
    }


    //获得评论下一页
    private void parserCommonPage(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        String uid = url.substring(44,url.length()-8);
        String commons = page.getHtml().xpath("//*[@id=\"commentnum\"]//text()").get();
        if (commons.equals("247")){
            log.error("Comments are fake data  size == 247");
        }
        String pageUrl = null;
             pageUrl ="https://comment.xcar.com.cn/interface/index.php?iact=CommentLevel&cid="+uid+
                    "&action=getNewsComment&sort=time&ctype=0&page="+1+"&limit=25&_="+System.currentTimeMillis();
        KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(pageUrl)
                .recordKey(pageUrl)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                .copyBizTags()
                .copyResultTags()
                .build();

        record.setFilter(filterInfoRecord.getFilter());
        record.setFilterInfos(filterInfoRecord.getFilterInfos());
        record.setTurnPageFilterInfo(filterInfoRecord.getTurnPageFilterInfo());
        parseLinks.add(record);
    }

    //文章存在翻页url
    private void parserMasterArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        //得到评论的url
        if (!crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.comment)){return;}
        String commonUrl = page.getHtml().xpath("//div[@id=\"newsbody\"]/a[@target=\"_blank\"]/@href").get();
        commonUrl = HTTPS + commonUrl;
        KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(commonUrl)
                .recordKey(commonUrl)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        record.setFilter(filterInfoRecord.getFilter());
        record.setFilterInfos(filterInfoRecord.getFilterInfos());
        record.tagsCreator().bizTags().getCategoryTag().addKVTag("urls",crawlerRecord.getHttpRequest().getUrl());
        record.setTurnPageFilterInfo(filterInfoRecord.getTurnPageFilterInfo());
        parseLinks.add(record);
    }
    //解析文章页
    private void parseArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String time = page.getHtml().xpath("/html/body/div[8]/div[1]/div[4]/div[1]/div/dl/dd/span/text()[1]").get();//时间
        time = time.substring(0,10);
        Long releaseTime = 0l;
        try {
            releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd").getTime();
        } catch (ParseException e) {
            log.error("Article time transcription error");
        }
        String url = crawlerRecord.getHttpRequest().getUrl();
        String[] split1 = url.split("/");
        String articleId = split1[4];
        articleId = articleId.substring(5,articleId.length()-7);
        crawlerRecord.tagsCreator().bizTags().addCustomKV("articleId",articleId);

        String articleUrl = page.getHtml().xpath("/html/body/div[8]/div[1]/div[4]/div[5]/div/a/@href").get();//文章所在的url
        if (articleUrl == null){//文章不存在分页
            this.parseArticleNoPage(crawlerRecord,page,parseLinks);
        }else{
            //内置下载，获取作者粉丝数
           //this.authorInternalDownload(crawlerRecord,page,parseLinks);
            //内置下载，获取品牌信息
            //文章主内容放在 其他链接中
            KVTag carMessage = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("carMessage");
            if (carMessage == null){
                this.brandUrl(crawlerRecord,page,parseLinks);
            }

            String url1 = crawlerRecord.getHttpRequest().getUrl();
            String[] split = url1.split("/");
            String dataId = split[4];
            articleUrl = HTTPS + articleUrl;
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(articleUrl)
                    .recordKey(articleUrl)
                    .releaseTime(releaseTime)
                    .copyBizTags()
                    .needWashed(true)
                    .copyResultTags()
                    .build();
            record.tagsCreator().bizTags().addCustomKV("dataId",dataId);
            record.tagsCreator().bizTags().addCustomKV("urls",articleUrl);
            crawlerRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag(String.valueOf(CrawlerEnum.CrawlerDataType.article));
            crawlerRecord.setNeedWashPage(false);
            parseLinks.add(record);
        }
    }
    //内置下载，获取品牌链接
    private void brandUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String url = page.getHtml().xpath("/html/body/div[7]/div/ul/li[1]/a/@href").get();
        if (url == null){
            page.getHtml().xpath("/html/body/div[6]/div/ul/li[1]/a/@href").get();
        }
        url = HTTPS + url;
        CrawlerRequestRecord authorRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .recordKey(url)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .copyResultTags()
                .copyBizTags()
                .build();
        parseLinks.add(authorRecord);
    }


    //当文章只有一页
    private void parseArticleNoPage(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        //this.authorInternalDownload(crawlerRecord,page,parseLinks);//内置下载获取作者粉丝数
        KVTag carMessage = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("carMessage");
        if (carMessage == null){
            this.brandUrl(crawlerRecord,page,parseLinks);
        }
        String url = crawlerRecord.getHttpRequest().getUrl();
        String[] split = url.split("/");
        String articleId = split[4];
        articleId = articleId.substring(5,articleId.length()-7);
        //评论页链接
        String commonUrl = "https://comment.xcar.com.cn/interface/index.php?" +
                "iact=CommentLevel&cid="+articleId+"&action=getNewsComment&sort=time&ctype=0&page=1&limit=25&_="+System.currentTimeMillis();
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(commonUrl)
                .recordKey(commonUrl)
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        record.tagsCreator().bizTags().getCategoryTag().addKVTag("urls",crawlerRecord.getHttpRequest().getUrl());
        parseLinks.add(record);
    }

    //得到文章url
    private void getNewUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        url = url + "news.htm";
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .releaseTime(System.currentTimeMillis())
                .httpUrl(url)
                .recordKey(url)
                .copyBizTags()
                .copyResultTags()
                .build();
        record.setNeedWashPage(false);
        parseLinks.add(record);
    }

    //解析明细页链接
    private void parseNewsListUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String urlID = crawlerRecord.getHttpRequest().getUrl();
        String url = crawlerRecord.getHttpRequest().getUrl();
        //得到下一页链接 首页进入///html/body/div[7]/div[1]/div/div[2]/div[1]/dl[1]/dd/p[2]/span[1]/text()
        if(urlID.matches(NewsList)){
            String text = page.getHtml().xpath("/html/body/div[7]/div[1]/div/div[2]/div[1]/div//text()").get();
            if (text.equals("暂无数据")){
                crawlerRecord.setNeedWashPage(false);
                return;
            }
            url = url.substring(0,url.length()-4);
            url = url+"_1_2.htm";
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(url)
                    .recordKey(url)
                    .releaseTime(System.currentTimeMillis())
                    .copyResultTags()
                    .copyBizTags()
                    .build();
            parseLinks.add(record);
        }
        //https://newcar.xcar.com.cn/1143/news.htm
        //https://newcar.xcar.com.cn/1143/news_1_2.htm
        //第二页进入 来获取第三页 ......
        if (urlID.matches(NewsListTurnPageZero)){
            String text = page.getHtml().xpath("/html/body/div[7]/div[1]/div/div[2]/div[1]/div//text()").get();
            if (text.equals("暂无数据")){
                crawlerRecord.setNeedWashPage(false);
                return;
            }
            String[] split = url.split("https://newcar.xcar.com.cn/\\S*/news_1_");
            String sIndex = split[1].substring(0, split[1].length()-4);
            Integer index = Integer.valueOf(sIndex);
            index = index + 1;
            if (index > 10){
                url = url.substring(0,url.length()-6);
                url = url + String.valueOf(index)+".htm";
            }else{
                url = url.substring(0,url.length()-5);
                url = url + String.valueOf(index)+".htm";
            }

            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(url)
                    .recordKey(url)
                    .releaseTime(System.currentTimeMillis())
                    .copyResultTags()
                    .copyBizTags()
                    .build();
            parseLinks.add(record);
        }
        //采集明细页
        List<Selectable> nodes = page.getHtml().xpath("/html/body/div[7]/div[1]/div/div[2]/div[1]/dl").nodes();
        List<String> list = new ArrayList<>();//存放需要累计的链接
        for (Selectable node :nodes){
            String pageUrl = node.xpath("./dt/a/@href").get();
            pageUrl = HTTPS + pageUrl;
            String time = node.xpath("./dd/p[2]/span[1]//text()").get();
            Long releaseTime = 0l;
            try {
                releaseTime = DateUtils.parseDate(time,"yyyy-MM-dd").getTime();
            } catch (ParseException e) {
                log.error("list Url time parse error");
            }
            if ( this.isDateRange(crawlerRecord,releaseTime) ==false){continue;}

            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(pageUrl)
                    .recordKey(pageUrl)
                    .releaseTime(releaseTime)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .copyResultTags()
                    .copyBizTags()
                    .build();
            record.tagsCreator().bizTags().addCustomKV("everyDay","everyDay");//每日链接标识
            record.tagsCreator().bizTags().addCustomKV("releaseTime",releaseTime);
            parseLinks.add(record);
        }
        crawlerRecord.setNeedWashPage(true);
        crawlerRecord.tagsCreator().bizTags().addCustomKV("List",list);
    }

    private void authorInternalDownload(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks){
        //内置下载获取作者粉丝数
        String authorUrl = page.getHtml().xpath("/html/body/div[8]/div[1]/div[4]/div[1]/div/dl/dd/a/@href").get();
        authorUrl = HTTPS + authorUrl;
        CrawlerRequestRecord authorRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(authorUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .copyResultTags()
                .copyBizTags()
                .build();
        this.addCookies(authorRecord);//添加cookies
        parseLinks.add(authorRecord);
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerData = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        String everyDay = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("everyDay");
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.article)){
            if((url.matches(NewsListArticleOne) || url.matches(NewsListArticleTwo) || url.matches(NewsListArticleThree) ||url.matches(MasterArticleOne) ||url.matches(MasterArticleTwo) || url.matches(MasterArticleThree))&& everyDay.equals("everyDay")){//累积链接
                this.washCountUrl(crawlerRecord,page,crawlerData);
            }
            if (url.matches(MasterArticleOne) ||url.matches(MasterArticleTwo) || url.matches(MasterArticleThree)){//解析存在分页的文章url
                this.washPageArticle(crawlerRecord,page,crawlerData);
            }
            if(url.matches(NewsListArticleOne) || url.matches(NewsListArticleTwo) || url.matches(NewsListArticleThree)){//不存在翻页情况
                this.washArticle(crawlerRecord,page,crawlerData);//解析文章  不存在分页
            }
        }
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.interaction)){
            //文章页互动量
            if(url.matches(NewsListArticleOne) || url.matches(NewsListArticleTwo) || url.matches(NewsListArticleThree)){
                this.washInteraction(crawlerRecord,page,crawlerData);
            }
        }
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.comment)){
            if (url.matches(CommonUrlPageOne) || url.matches(CommonUrlPageTwo)){//清洗存在翻页的文章评论
                this.washPageCommont(crawlerRecord,page,crawlerData);
            }
        }

        return crawlerData;
    }
    //清洗文章评论
    private void washPageCommont(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        String url = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("urls");
        String articleId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");
        String config = page.getJson().jsonPath($_type+".config").get();
        JSONObject jsonObject0 = JSONObject.parseObject(config);
        String count = (String) jsonObject0.get("count");//评论数
        if (count.equals("247")){
            log.error("Article comments cannot be displayed have Anti - crawler technology error 247  Don't wash comment");
            return;
        }
        String info = page.getJson().jsonPath($_type + ".info").get();
        JSONObject jsonObject01 = JSONObject.parseObject(info);
        String message = (String) jsonObject01.get("message");
        if (message.equals("获取失败")){
            crawlerRecord.setNeedWashPage(false);
            return;
        }
        List<String> all = page.getJson().jsonPath($_type + ".list").all();
        for (String s : all){
            JSONObject jsonObject = JSONObject.parseObject(s);
            String dataId = (String) jsonObject.get("id");//评论id
            String user_id = (String) jsonObject.get("user_id");//作者id
            String user_name = (String) jsonObject.get("user_name");//作者名称
            String pub_time = (String) jsonObject.get("pub_time");//评论发表时间
            Long releaseTime = 0l;
            if(pub_time.contains("昨天")){
                releaseTime = System.currentTimeMillis();
                releaseTime = releaseTime - 86400000l;
            }
            if(pub_time.contains("分钟前") || pub_time.contains("小时")){
                releaseTime = System.currentTimeMillis();
                releaseTime = releaseTime - 86460000l;
            }
            if(pub_time.contains("前天")){
                releaseTime = System.currentTimeMillis();
                releaseTime = releaseTime - 172800000l;
            }
            String img_path = (String) jsonObject.get("img_path");//评论图片

            try {
                if (releaseTime == 0l){
                    if (pub_time.contains("年")){
                        releaseTime = DateUtils.parseDate(pub_time, "yyyy年MM月dd日").getTime();
                    }else{
                        releaseTime = DateUtils.parseDate("2021年"+pub_time, "yyyy年MM月dd日").getTime();
                    }
                }

            } catch (ParseException e) {
                log.error("get article time fail");
            }
            //回复的回复
            JSONObject child = jsonObject.getJSONObject("child");
            if(child != null){
                JSONArray list = child.getJSONArray("list");
                for (Object obj :list){
                    JSONObject jsonObject1 = JSONObject.parseObject((String) obj);
                    String id = (String) jsonObject1.get("id");
                    String user_id1 = (String) jsonObject1.get("user_id");
                    String user_name1 = (String) jsonObject1.get("user_name");
                    String pub_time1 = (String) jsonObject1.get("pub_time");
                    Long releaseTime1 = 0l;
                    if(pub_time1.contains("昨天")){
                        releaseTime = System.currentTimeMillis();
                        releaseTime1 = releaseTime1 - 86400000l;
                    }
                    if(pub_time1.contains("分钟前") || pub_time.contains("小时")){
                        releaseTime = System.currentTimeMillis();
                        releaseTime1 = releaseTime1 - 86460000l;
                    }
                    if(pub_time1.contains("前天")){
                        releaseTime = System.currentTimeMillis();
                        releaseTime1 = releaseTime1 - 172800000l;
                    }
                    String conts1 = (String) jsonObject1.get("conts");
                    String ups1 = (String) jsonObject1.get("ups");
                    String img_path1 = (String) jsonObject1.get("img_path");//评论图片
                    try {
                        if (releaseTime1 == 0l){
                            if (pub_time.contains("年")){
                                releaseTime1 = DateUtils.parseDate(pub_time1, "yyyy年MM月dd日").getTime();
                            }else{
                                releaseTime1 = DateUtils.parseDate("2021年"+pub_time1, "yyyy年MM月dd日").getTime();
                            }
                        }
                    } catch (ParseException e) {
                        log.error("get article time fail");
                    }
                    CrawlerData crawlerData2 = CrawlerData.builder()
                            .data(crawlerRecord,page)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), id))
                            .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), id))
                            .url(url)
                            .releaseTime(releaseTime)
                            .addContentKV(AICCommonField.Field_I_Likes,ups1)
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                            .flowInPipelineTag("kafka")
                            .build();
                    crawlerData2.setFilterPipelineResult(true);
                    crawlerData.add(crawlerData2);
                    if (!isDateRange(crawlerRecord,releaseTime1)){
                        continue;
                    }
                    CrawlerData crawlerData1 = CrawlerData.builder()
                            .data(crawlerRecord,page)
                            .releaseTime(releaseTime1)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), id))
                            .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleId))
                            .url(url)
                            .addContentKV(AICCommonField.Field_Author,user_name1)
                            .addContentKV(AICCommonField.Field_Author_Id,user_id1)
                            .addContentKV(AICCommonField.Field_Content,conts1)
                            .addContentKV(AICCommonField.Field_Images,img_path1)
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                            .flowInPipelineTag("kafka")
                            .build();
                    crawlerData1.setFilterPipelineResult(true);
                    crawlerData.add(crawlerData1);

                }
            }

            String ups = (String) jsonObject.get("ups");//点赞数
            String conts = (String) jsonObject.get("conts");//评论内容

            CrawlerData crawlerData2 = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), dataId))
                    .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), dataId))
                    .url(url)
                    .releaseTime(releaseTime)
                    .addContentKV(AICCommonField.Field_I_Likes,ups)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData2.setFilterPipelineResult(true);

            if (!isDateRange(crawlerRecord,releaseTime)){
                return;
            }

            CrawlerData crawlerData1 = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .releaseTime(releaseTime)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), dataId))
                    .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleId))
                    .url(url)
                    .addContentKV(AICCommonField.Field_Author,user_name)
                    .addContentKV(AICCommonField.Field_Author_Id,user_id)
                    .addContentKV(AICCommonField.Field_Content,conts)
                    .addContentKV(AICCommonField.Field_Images,img_path)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData1.setFilterPipelineResult(true);
            crawlerData.add(crawlerData1);

            crawlerData.add(crawlerData2);
        }
    }
    //解析存在分页的文章url
    private void washPageArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        String follows = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("follows");
        if (follows == null){
            follows = "0";
        }
        String time = page.getHtml().xpath("//*[@id=\"pubtime_baidu\"]/text()").get().replaceAll(" ","");//得到文章发表时间
        time = time.substring(1,11);
        Long releaseTime = 0l;
        try {
            releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd").getTime();
        } catch (ParseException e) {
            log.error("get article time fail");
        }
        String title = page.getHtml().xpath("//*[@id=\"class\"]/div[5]/div[1]/div[1]/text()").get();//文章标题
        String authorName  = page.getHtml().xpath("//*[@id=\"author_baidu\"]/a/text()").get();//作者名字
        String authorId = page.getHtml().xpath("//*[@id=\"author_baidu\"]/a/@href").get();//作者id
        String[] split = authorId.split("//my.xcar.com.cn/space.php\\?uid=");
        authorId = split[1];
        List<Selectable> nodes = page.getHtml().xpath("//*[@id=\"newsbody\"]/p").nodes();
        List<String> all1 = page.getHtml().xpath("//*[@id=\"newsbody\"]/p//strong").all();
        StringBuilder content = new StringBuilder();
        for (Selectable node:nodes){
            String s = node.xpath(".//text()").get();
            if(s == null){
                s = "";
            }
            content.append(s);
        }

        List<String> all = page.getHtml().xpath("//*[@id=\"newsbody\"]//img/@src").all();//图片
        StringBuffer images = new StringBuffer();//所有图片地址
        for(String img : all){
            //String img1 = HTTPS + img;
            images.append(HTTPS+img).append("\\x01");
        }

        String articleId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");
        CrawlerData crawlerData1 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleId))
                .url(url)
                .releaseTime(releaseTime)
                .addContentKV(AICCommonField.Field_Title,title)
                .addContentKV(AICCommonField.Field_Author,authorName)
                .addContentKV(AICCommonField.Field_Author_Id,authorId)
                .addContentKV(AICCommonField.Field_Content, String.valueOf(content))
                .addContentKV(AICCommonField.Field_Images, String.valueOf(images))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData1.setFilterPipelineResult(true);
        crawlerData.add(crawlerData1);
    }

    //解析文章
    private void washArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        String url = crawlerRecord.getHttpRequest().getUrl();

        String time = page.getHtml().xpath("/html/body/div[8]/div[1]/div[@class=\"left_border_box\"]/div[1]/div[@class=\"personinfo\"]/dl/dd/span/text()").get();
        Long releaseTime = 0l;
        time = time.substring(0,10);
        try {
            releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd").getTime();
        } catch (ParseException e) {
            log.error("Article time transcription error");
        }
        String authorName = page.getHtml().xpath("/html/body/div[8]/div[1]/div[4]/div[1]/div/dl/dd/a/text()").get();
        String authorId = page.getHtml().xpath("/html/body/div[8]/div[1]/div[4]/div[1]/div/dl/dd/a/@href").get();
        String title = page.getHtml().xpath("/html/body/div[8]/div[1]/div[3]/div/h1/text()").get();
        List<Selectable> nodes = page.getHtml().xpath("//*[@id=\"newsbody\"]/p").nodes();//获取文章内容
        StringBuilder content = new StringBuilder();
        for (Selectable node:nodes){
            String s = node.xpath(".//text()").get();
            if(s == null){
                s = "";
            }
            content.append(s);
        }
        List<String> imgs = page.getHtml().xpath("/html/body/div[8]/div[1]/div[4]//img/@src").all();
        StringBuffer images = new StringBuffer();
        for (String img : imgs){
            images.append(HTTPS + img).append("\\x01");
        }
        String articleId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");
        KVTag carMessage = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("carMessage");
        Map<String,String> val = null;
        if (carMessage != null){
            val = (Map<String, String>) carMessage.getVal();
        }
        CrawlerData crawlerData1 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleId))
                .url(url)
                .releaseTime(releaseTime)
                .addContentKV(AICCommonField.Field_Title,title)
                .addContentKV(AICCommonField.Field_Author,authorName)
                .addContentKV(AICCommonField.Field_Author_Id,authorId)
                .addContentKV(AICCommonField.Field_Content, String.valueOf(content))
                .addContentKV(AICCommonField.Field_Images, String.valueOf(images))
                .addContentKV(AutoVMCommonField.Field_Brand_name,val.get("brand_name"))
                .addContentKV(AutoVMCommonField.Field_Brand_url,val.get("brandUrl"))
                .addContentKV(AutoVMCommonField.Field_Brand_id,val.get("brandId"))
                .addContentKV(AutoVMCommonField.Field_Series_name,val.get("seriesName"))
                .addContentKV(AutoVMCommonField.Field_Series_url,val.get("seriesUrl"))
                .addContentKV(AutoVMCommonField.Field_Series_id,val.get("seriesId"))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData1.setFilterPipelineResult(true);
        crawlerData.add(crawlerData1);
    }

    //清洗文章互动量
    private void washInteraction(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        String comments = page.getHtml().xpath("//*[@id=\"commentnum\"]/i/text()").get();
        String time = page.getHtml().xpath("/html/body/div[8]/div[1]/div[4]/div[1]/div/dl/dd/span/text()[1]").get();//时间
        Long releaseTime = 0l;
        time = time.substring(0,10);
        try {
            releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd").getTime();
        } catch (ParseException e) {
            log.error("Article time transcription error");
        }
        String url = crawlerRecord.getHttpRequest().getUrl();
        String[] split = url.split("/");
        String parentId = split[4];
        CrawlerData crawlerData1 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), parentId))
                .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), parentId))
                .url(url)
                .releaseTime(releaseTime)
                .addContentKV(AICCommonField.Field_I_Comments,comments)
                .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData1.setFilterPipelineResult(true);
        crawlerData.add(crawlerData1);
    }

    //明细页累计
    private void washCountUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerData) {
        KVTag carMessage = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("carMessage");
        Map<String,String> map = null;
        if (carMessage != null){
            map = (Map<String, String>) carMessage.getVal();
        }
        String releaseTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("releaseTime");
        String url = crawlerRecord.getHttpRequest().getUrl();
        String[] split = url.split("/");
        String dataId = split[split.length-1].substring(0,split[split.length-1].length()-5);
        //dataId = dataId.substring(0,dataId.length()-5);
        CrawlerData crawlerData1 = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), dataId))
                .releaseTime(Long.valueOf(releaseTime))
                .url(url)
                .addContentKV("url",url)
                .addContentKV(AICCommonField.Tag_Site_Info,"车系页文章")
                .addContentKV("releaseTime",releaseTime)
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .flowInPipelineTag("redis")
                .build();
        crawlerData1.tagsCreator().bizTags().addCustomKV("brand",map);
        crawlerData1.setFilterPipelineResult(true);
        crawlerData.add(crawlerData1);

    }

    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links){
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            String url = internalDownloadRecord.getHttpRequest().getUrl();
            if (url.matches(UserUrl)) {//清洗作者粉丝数
                Html html = internalDownloadRecord.getInternalDownloadPage().getHtml();
                String follows = html.xpath("/html/body/div[2]/div[1]/div/div[1]/div/div[1]/ul/li[2]/a/strong/text()").get();//粉丝数
                crawlerRecord.tagsCreator().bizTags().addCustomKV("follows", follows);
            }
            if (url.matches(Url)){//清洗文章的品牌、子品牌、车系
                if (internalDownloadRecord.getInternalDownloadPage().getStatusCode() != 200 || internalDownloadRecord.getInternalDownloadPage().getHtml() == null){
                    log.error(" brand name page download fail  or page ==null");
                    return;
                }
                Html html = internalDownloadRecord.getInternalDownloadPage().getHtml();

                String carName = html.xpath("/html/body/div[@class=\"place_nv\"]/div[@class=\"place\"]/a[1]/text()").get();//汽车网
                String carUrl = html.xpath("/html/body/div[@class=\"place_nv\"]/div[@class=\"place\"]/a/@href").get();//汽车网url
                carUrl = HTTPS +carUrl;
                String brandName = html.xpath("/html/body/div[@class=\"place_nv\"]/div[@class=\"place\"]/a[2]/text()").get();//品牌名
                String brandUrl = html.xpath("/html/body/div[@class=\"place_nv\"]/div[@class=\"place\"]/a[2]/@href").get();//品牌名url
                String[] split = brandUrl.split("/");
                String brandId = split[2];
                brandUrl = HTTPS + brandUrl;
                String childBrandName = html.xpath("/html/body/div[@class=\"place_nv\"]/div[@class=\"place\"]/a[3]/text()").get();//子品牌名
                String childBrandUrl = html.xpath("/html/body/div[@class=\"place_nv\"]/div[@class=\"place\"]/a[3]/@href").get();//子品牌url
                String[] split1 = childBrandUrl.split("/");
                String childBrandId = split1[3];
                childBrandUrl = HTTPS + childBrandUrl;
                String seriesName = html.xpath("/html/body/div[@class=\"place_nv\"]/div[@class=\"place\"]/a[4]/text()").get();//车系名
                String seriesUrl = html.xpath("/html/body/div[@class=\"place_nv\"]/div[@class=\"place\"]/a[4]/@href").get();//车系url
                String[] split2 = seriesUrl.split("/");
                String seriesId = split2[1];
                seriesUrl = "https://newcar.xcar.com.cn/"+seriesId;
                Map<String,String> map = new HashMap<>();
                map.put("carName",carName);
                map.put("carUrl",carUrl);
                map.put("brandName",brandName);
                map.put("brandUrl",brandUrl);
                map.put("brandId",brandId);
                map.put("childBrandName",childBrandName);
                map.put("childBrandUrl",childBrandUrl);
                map.put("childBrandId",childBrandId);
                map.put("seriesName",seriesName);
                map.put("seriesUrl",seriesUrl);
                map.put("seriesId",seriesId);
               crawlerRecord.tagsCreator().bizTags().addCustomKV("carMessage",map);
            }
        }
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }


    private void addCookies(CrawlerRequestRecord authorRecord) {
        authorRecord.getHttpRequest().setResponseCharset("UTF-8");
        Map<String,String> cookies = new HashMap<>();
        cookies.put("YD00788855712789%3AWM_TID","G1UtOd1WcttFFQBQVUN6nlvBF9t%2FOIiw");
        cookies.put("_Xdwnewuv","1");
        cookies.put("_Xdwuv","6288360682605");
        cookies.put("__isshowad","no");
        cookies.put("__jsluid_s","d9e9babfcf901b9f7e386faa222ab083");
        cookies.put("_discuz_pw","f057fda763adad2a9d987d3a9079538e");
        cookies.put("_discuz_uid","19093538");
        cookies.put("_discuz_vip","5");
        cookies.put("_xcar_name","xuser19193538");
        cookies.put("_xcar_name_utf8","xuser19193538");
        cookies.put("bbs_auth","BEjrtFMsr0dcZCJzusMUswoytXnbuhi%2BNXfTs8CMUnlRizGps7lX%2B4bRKkumY1lBDDQ");
        cookies.put("bbs_cookietime","31536000");
        cookies.put("fw_clc","1%3A1628836045%3B1%3A1628836050%3B1%3A1628836051%3B1%3A1628836052%3B1%3A1628836053");
        cookies.put("isReportedZhuge","1");
        cookies.put("nguv","c_16288360672480271708317218342411075");
        authorRecord.getHttpRequest().setCookies(cookies);
    }

    /*
     * 下在失败 重新下载
     * */
    private void recordAgainRequest(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> linksRecords) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 1;
        if (crawlerBusinessTags.hasKVTag(RECORD_AGAIN_REQUEST)){//判断是否重新下载过
            count = (int)crawlerBusinessTags.getCategoryTag().getKVTag(RECORD_AGAIN_REQUEST).getVal();
            if (count >= 10){
                log.error(DOMAIN + " download page the number of retries exceeds the limit,request url {}",crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }
        String url = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get(TURN_PAGE_ITEM_REQUEST);
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null){
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .releaseTime(System.currentTimeMillis())
                    .httpUrl(url)
                    .recordKey(crawlerRecord.getRecordKey()+count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();//文章//文章存在翻页url
        }else if(url.matches(NewsListArticleOne) || url.matches(NewsListArticleTwo) || url.matches(NewsListArticleThree) ||url.matches(MasterArticleOne) || url.matches(MasterArticleTwo) ||url.matches(MasterArticleThree)){
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .releaseTime(System.currentTimeMillis())
                    .httpUrl(url)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .recordKey(crawlerRecord.getRecordKey()+count)
                    .copyBizTags()
                    .copyRequestTags()
                    .build();

        }else{
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .releaseTime(System.currentTimeMillis())
                    .httpUrl(url)
                    .recordKey(crawlerRecord.getRecordKey()+count)
                    .copyBizTags()
                    .copyRequestTags()
                    .build();
        }
        if (crawlerRequestRecord == null){
            return;
        }
        //文章页  //文章存在翻页url

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(RECORD_AGAIN_REQUEST,count);
        linksRecords.add(crawlerRequestRecord);
    }

}
