package com.chance.cc.crawler.development.scripts.ifeng.auto;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @ClassName song
 * @Description TODO
 * @Author ding
 * @Date 2021/8/31 16:09
 * @Version 1.0
 * 每日采集七日内文章、互动量、昨日最新回复
 **/
public class IFengSeriesArticleCrawlerScript extends CrawlerCommonScript {
    private static Logger log = LoggerFactory.getLogger(IFengSeriesArticleCrawlerScript.class);
    private static final String domain = "ifeng";
    private static final String site = "SeriesArticle";
    private static final String RECORD_AGAIN_REQUEST = "record_again_request";
    private static final String commentFilter = "comment_record_filter_info";
    private static final String URL = "https://ncar.auto.ifeng.com/model/modelArticlePage?serialId=";

    private static final String listPageUrl = "https://ncar.auto.ifeng.com/model/modelArticlePage\\?serialId=\\S*";
    private static final String listArticleUrl = "https://api.auto.ifeng.com/api/industryNewsInfo.do\\?serialId=\\S*&action=catNews&except=1022681";

    private static final String ArticleUrlType1 = "http://auto.ifeng.com/pinglun/\\S*";
    private static final String ArticleUrlType2 = "http://auto.ifeng.com/tujie/\\S*";
    private static final String ArticleUrlType3 = "http://auto.ifeng.com/fangtan/\\S*";
    private static final String ArticleUrlType4 = "http://auto.ifeng.com/xinwen/\\S*";
    private static final String ArticleUrlType5 = "http://auto.ifeng.com/daogou/\\S*";
    private static final String ArticleUrlType6 = "http://auto.ifeng.com/shijia/\\S*";
    private static final String ArticleUrlType7 = "http://auto.ifeng.com/haiwai/\\S*";
    private static final String ArticleUrlType8 = "http://auto.ifeng.com/guonei/\\S*";
    private static final String ArticleUrlType9 = "http://auto.ifeng.com/diezhao/\\S*";
    private static final String ArticleUrlType10 = "http://auto.ifeng.com/quanmeiti/\\S*";
    private static final String ArticleUrlType11 = "http://auto.ifeng.com/pic/\\S*";
    private static final String ArticleUrlType12 = "https://auto.ifeng.com/c/\\S*"; //带有评论的文章
    private static final String ArticleUrlType13 = "http://auto.ifeng.com/news/\\S*";
    private static final String interactionUrlType = "https://comment.ifeng.com/get/count\\?format=json&docurl=\\S*&callback=getCommentCount";
    private static final String commentUrlType = "https://comment.ifeng.com/get.php\\?callback=hotCommentListCallBack&orderby=uptimes\\S*";

    private static final String updateListArticleUrl = "https://api.auto.ifeng.com/api/industryNewsInfo.do?serialId=%s&action=catNews&except=1022681";
    private static final String InteractionUrl = "https://comment.ifeng.com/get/count?format=json&docurl=%s&callback=getCommentCount";
    private static final String CommentUrl = "https://comment.ifeng.com/get.php?callback=hotCommentListCallBack&orderby=uptimes&docUrl=%s&format=json&job=1&p=1&pageSize=10&callback=hotCommentListCallBack&skey=%s";
    @Override
    public void initUrlRegulars() {
        addUrlRegular(listPageUrl);
        addUrlRegular(listArticleUrl);
        addUrlRegular(ArticleUrlType1);
        addUrlRegular(ArticleUrlType2);
        addUrlRegular(ArticleUrlType3);
        addUrlRegular(ArticleUrlType4);
        addUrlRegular(ArticleUrlType5);
        addUrlRegular(ArticleUrlType6);
        addUrlRegular(ArticleUrlType7);
        addUrlRegular(ArticleUrlType8);
        addUrlRegular(ArticleUrlType9);
        addUrlRegular(ArticleUrlType10);
        addUrlRegular(ArticleUrlType11);
        addUrlRegular(ArticleUrlType12);
        addUrlRegular(ArticleUrlType13);
        addUrlRegular(interactionUrlType);
        addUrlRegular(commentUrlType);

    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> listRecord = new ArrayList<>();
        if (supportSourceRecords != null){
            for (CrawlerRequestRecord supportSourceRecord :supportSourceRecords){
                String url = supportSourceRecord.getHttpRequest().getUrl();
                if (url.contains("keys")){
                    HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
                    Json json = internalDownloadPage.getJson();
                    String msg = json.jsonPath($_type+".msg").get();
                    if (!"success".equals(msg)){
                        log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
                        return listRecord;
                    }
                    List<String> all = json.jsonPath($_type + ".content").all();
                    for (String data : all) {
                        JSONObject jsonObject = JSONObject.parseObject(data);
                        String keyword = jsonObject.getString("keyword");
                        String startUrl = URL + keyword;
                        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                                .itemPageRequest(requestRecord)
                                .httpUrl(startUrl)
                                .releaseTime(System.currentTimeMillis())
                                .needWashed(true)
                                .needParsed(true)
                                .copyResultTags()
                                .copyBizTags()
                                .build();
                        listRecord.add(record);
                    }
                }
            }
        }
        return listRecord;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();
        if (page.getStatusCode() != 200 ||  !page.isDownloadSuccess()){
            log.error("page == [{}] || statusCode != 200 and error page = "+ page.getStatusCode());
            if (page.getStatusCode() != 404){
                recordAgainRequest(crawlerRecord,parseLinks);
                crawlerRecord.setNeedWashPage(false);
                return  parseLinks;
            }else{
                crawlerRecord.setNeedWashPage(false);
                return  parseLinks;
            }
        }
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (url.matches(listPageUrl)){
            this.getListArticleUrl(crawlerRecord,page,parseLinks);
        }
        if (url.matches(listArticleUrl)){
            this.getArticleUrl(crawlerRecord,page,parseLinks);
        }
        //不存在评论的url
        if (url.matches(ArticleUrlType1) || url.matches(ArticleUrlType2) ||url.matches(ArticleUrlType3) ||url.matches(ArticleUrlType4) ||
                url.matches(ArticleUrlType5) ||url.matches(ArticleUrlType6) ||url.matches(ArticleUrlType7) ||url.matches(ArticleUrlType8) ||
                url.matches(ArticleUrlType9) ||url.matches(ArticleUrlType10) ||url.matches(ArticleUrlType11)||url.matches(ArticleUrlType13)){
            crawlerRecord.setNeedWashPage(true);
        }
        //存在评论的url
        if (url.matches(ArticleUrlType12)){
            this.parseArticle(crawlerRecord,page,parseLinks);
        }
        return parseLinks;
    }

    private void parseArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {

        String rawText = page.getRawText();
        Matcher mtData = Pattern.compile("var allData = (\\{.*\\});").matcher(rawText);
        while(mtData.find()) {
            String allData = mtData.group(0).split("allData =")[1].trim();
            allData = allData.substring(0, allData.lastIndexOf(";"));
            JSONObject jsonObject = JSONObject.parseObject(allData);
            JSONObject docData = jsonObject.getJSONObject("docData");
            if (docData == null){return;}
            String newsTime = docData.getString("newsTime");
            long releaseTime = 0l;
            int i = new Random().nextInt(1000000000);
            crawlerRecord.tagsCreator().bizTags().addCustomKV("articleId",i);
            try {
                releaseTime = DateUtils.parseDate(newsTime,"yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
               log.error("time parse err");
            }
            if(!isDateRange(crawlerRecord,releaseTime)){
                crawlerRecord.setNeedWashPage(false);
                return;
            }
            String commentUrl = (String) docData.get("commentUrl");//获得互动量的接口参数
            String skey = (String) docData.get("skey");//获得评论接口的参数
            String interactionUrl = String.format(InteractionUrl, commentUrl);
            String commentsUrl = String.format(CommentUrl,commentUrl,skey);
            CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(interactionUrl)
                    .releaseTime(releaseTime)
                    .copyResultTags()
                    .copyBizTags()
                    .build();
            interactionRecord.tagsCreator().bizTags().addCustomKV("articleId",i);
            parseLinks.add(interactionRecord);
            KVTag kvTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(commentFilter);
            CrawlerRecord filterRecord = JSONObject.parseObject((String) kvTag.getVal(), CrawlerRecord.class);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(commentsUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyResultTags()
                    .notFilterRecord()
                    .copyBizTags()
                    .build();
            commentRecord.tagsCreator().bizTags().addCustomKV("articleId",i);
            commentRecord.setFilter(filterRecord.getFilter());
            commentRecord.setFilterInfos(filterRecord.getFilterInfos());
            parseLinks.add(commentRecord);
        }
    }

    /*
    * get文章链接
    * */
    private void getArticleUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        List<String> all = page.getJson().jsonPath($_type).all();
        for (String json : all){
            JSONObject jsonObject = JSONObject.parseObject(json);
            String thumb = (String) jsonObject.get("thumb");
            String id = (String) jsonObject.get("id");
            String articleUrl = (String) jsonObject.get("url");
            StringBuilder time = new StringBuilder();
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(articleUrl)
                    .releaseTime(System.currentTimeMillis())
                    .needParsed(true)
                    .needWashed(true)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            record.tagsCreator().bizTags().addCustomKV("articleId",id);
            parseLinks.add(record);
        }
    }

    /*
    * get列表页链接
    * */
    private void getListArticleUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        String[] split = url.split("https://ncar.auto.ifeng.com/model/modelArticlePage\\?serialId=");
        String seriesId = split[1];
        String listUrl = String.format(updateListArticleUrl, seriesId);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(listUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .needWashed(false)
                .build();
        parseLinks.add(record);
    }


    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> listCrawlerData = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.article)){
            //不存在评论的url
            if (url.matches(ArticleUrlType1) || url.matches(ArticleUrlType2) ||url.matches(ArticleUrlType3) ||url.matches(ArticleUrlType4) ||
                    url.matches(ArticleUrlType5) ||url.matches(ArticleUrlType6) ||url.matches(ArticleUrlType7) ||url.matches(ArticleUrlType8) ||
                    url.matches(ArticleUrlType9) ||url.matches(ArticleUrlType10) ||url.matches(ArticleUrlType11)||url.matches(ArticleUrlType13)){
                this.washNoCommentArticle(crawlerRecord,page,listCrawlerData);
            }
            if (url.matches(ArticleUrlType12)){
                this.washCommentArticle(crawlerRecord,page,listCrawlerData);
            }
        }
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.interaction)){
           if (url.matches(interactionUrlType) && !(url.matches(ArticleUrlType12))){
               this.washInteraction(crawlerRecord,page,listCrawlerData);
           }
        }
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.comment)){
            if (url.matches(commentUrlType)){
                this.washComment(crawlerRecord,page,listCrawlerData);
            }
        }

        return listCrawlerData;
    }

    private void washComment(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> listCrawlerData) {
        List<String> all = page.getJson().jsonPath($_type + ".comments").all();
        if (all.size() != 0){
            for (String s : all){
                JSONObject jsonObject = JSONObject.parseObject(s);
                String comment_id = (String) jsonObject.get("comment_id");
                String article_id = (String) jsonObject.get("article_id");
                String author = (String) jsonObject.get("uname");
                String create_time = (String) jsonObject.get("create_time");
                create_time = create_time + "000";
                if (!isDateRange(crawlerRecord,Long.valueOf(create_time))){
                    return;
                }

                String articleId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");
                String comment_contents = (String) jsonObject.get("comment_contents");//回复内容
                String uptimes = (String) jsonObject.get("uptimes");//点赞数
                String integral = (String) jsonObject.get("integral");//回复数
                CrawlerData commentData = CrawlerData.builder()
                        .data(crawlerRecord,page)
                        .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.comment,comment_id))
                        .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,articleId))
                        .url(crawlerRecord.getHttpRequest().getUrl())
                        .releaseTime(Long.valueOf(create_time))
                        .addContentKV(AICCommonField.Field_Author,author)
                        .addContentKV(AICCommonField.Field_Content,comment_contents)
                        .flowInPipelineTag("kafka")
                        .build();
                commentData.setFilterPipelineResult(true);
                listCrawlerData.add(commentData);

                CrawlerData interactionData = CrawlerData.builder()
                        .data(crawlerRecord,page)
                        .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.interaction,comment_id))
                        .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.comment,comment_id))
                        .url(crawlerRecord.getHttpRequest().getUrl())
                        .releaseTime(Long.valueOf(create_time))
                        .addContentKV(AICCommonField.Field_I_Likes,uptimes)
                        .addContentKV(AICCommonField.Field_I_Comments,integral)
                        .flowInPipelineTag("kafka")
                        .build();
                commentData.setFilterPipelineResult(true);
                listCrawlerData.add(interactionData);

            }
        }

    }

    private void washInteraction(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> listCrawlerData) {
        String articleId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");
        if (articleId == null){
            return;
        }
        String like = page.getJson().jsonPath($_type + ".allcount").get();
        String comments = page.getJson().jsonPath($_type+".count").get();
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.interaction,articleId))
                .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,articleId))
                .url(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(crawlerRecord.getReleaseTime())
                .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                .addContentKV(AICCommonField.Field_I_Likes,like)
                .addContentKV(AICCommonField.Field_I_Comments,comments)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData.setFilterPipelineResult(true);
        listCrawlerData.add(crawlerData);
    }

    /*
    * 清洗带评论的
    * */
    private void washCommentArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> listCrawlerData) {
        String seriesId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("arti");
        String articleId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");
        String rawText = page.getRawText();
        Matcher mtData = Pattern.compile("var allData = (\\{.*\\});").matcher(rawText);
        while(mtData.find()){
            String allData = mtData.group(0).split("allData =")[1].trim();
            allData = allData.substring(0,allData.lastIndexOf(";"));
            JSONObject jsonObject = JSONObject.parseObject(allData);
            JSONObject docData = jsonObject.getJSONObject("docData");
            String newsTime = docData.getString("newsTime");
            Long releaseTime = 0l;
            try {
                releaseTime = DateUtils.parseDate(newsTime,"yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                e.printStackTrace();
            }
            if (!isDateRange(crawlerRecord,releaseTime)){
                return;
            }
            crawlerRecord.setReleaseTime(releaseTime);
            String source = (String) docData.get("source");
            String title = (String) docData.get("title");
            JSONObject contentData = docData.getJSONObject("contentData");
            JSONArray contentList = contentData.getJSONArray("contentList");
            Object o = contentList.get(0);
            JSONObject json = JSONObject.parseObject((String) o);
            String data = (String) json.get("data");
            page.setHtml(Html.create(data));
            List<String> allText = page.getHtml().xpath("//p//text()").all();
            StringBuffer text = new StringBuffer();
            for (String s : allText){
                text.append(s);
            }
            List<String> imageText = page.getHtml().xpath("//img/@src").all();
            StringBuffer images = new StringBuffer();
            for (String s : imageText){
                images.append(s).append("\\x01");
            }

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,articleId))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(releaseTime)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .addContentKV(AICCommonField.Field_Author,source)
                    .addContentKV(AICCommonField.Field_Content, String.valueOf(text))
                    .addContentKV(AICCommonField.Field_Images, String.valueOf(images))
                    .addContentKV(AICCommonField.Field_Title,title)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            listCrawlerData.add(crawlerData);
        }

    }


    private void washNoCommentArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> listCrawlerData) {
        String time = page.getHtml().xpath("//*[@id=\"pubtime_baidu\"]//text()").get();
        if (time == null){
            this.washArticle(crawlerRecord,page,listCrawlerData);
            return;
        }
        Long releaseTime = 0l;
        try {
            releaseTime = DateUtils.parseDate(time,"yyyy年MM月dd日 HH:mm:ss").getTime();
        } catch (ParseException e) {
            e.printStackTrace();
        }
        crawlerRecord.getFilter();
        KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(commentFilter);
        CrawlerRecord crawlerRecordFilter = JSONObject.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
        crawlerRecord.setFilter(crawlerRecordFilter.getFilter());
        crawlerRecord.setFilterInfos(crawlerRecordFilter.getFilterInfos());
        crawlerRecord.getFilter();
        if (!isDateRange(crawlerRecord,releaseTime)){
            return;
        }
        crawlerRecord.setReleaseTime(releaseTime);
        String author = page.getHtml().xpath("//*[@id=\"source_baidu\"]/a").get();
        String title = page.getHtml().xpath("/html/body/div[@class=\"w1000\"]/div[1]/div[@class=\"br\"]/div[@class=\"arl-cont\"]/h3/span/text()").get();
        String seriesId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("seriesId");
        List<String> allText = page.getHtml().xpath("//div[@class=\"arl-c-txt\"]/p//text()").all();
        StringBuffer text = new StringBuffer();
        for (String s : allText){
            text.append(s);
        }
        List<String> imageText = page.getHtml().xpath("//div[@class=\"arl-c-txt\"]/p//img/@src").all();
        StringBuffer images = new StringBuffer();
        for (String s : imageText){
            images.append(s).append("\\x01");
        }
        Integer i = new Random().nextInt(10000000);
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article, i))
                .url(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(releaseTime)
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .addContentKV(AICCommonField.Field_Author,author)
                .addContentKV(AICCommonField.Field_Content, String.valueOf(text))
                .addContentKV(AICCommonField.Field_Images, String.valueOf(images))
                .addContentKV(AICCommonField.Field_Title,title)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData.setFilterPipelineResult(true);
        listCrawlerData.add(crawlerData);
    }

    private void washArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> listCrawlerData) {
        String time = page.getHtml().xpath("//span[@class=\"t_1\"]//text()").get();
        if (time == null){return;}
        Long releaseTime = 0l;
        try {
            releaseTime = DateUtils.parseDate(time,"yyyy年MM月dd日 mm:ss").getTime();
        } catch (ParseException e) {
            e.printStackTrace();
        }
        crawlerRecord.getFilter();
        KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(commentFilter);
        CrawlerRecord crawlerRecordFilter = JSONObject.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
        crawlerRecord.setFilter(crawlerRecordFilter.getFilter());
        crawlerRecord.setFilterInfos(crawlerRecordFilter.getFilterInfos());
        crawlerRecord.getFilter();
        if (!isDateRange(crawlerRecord,releaseTime)){
            return;
        }
        crawlerRecord.setReleaseTime(releaseTime);
        String author = page.getHtml().xpath("/html/body/div[14]/div[1]/div[3]/span[2]/span/text()").get();
        String title = page.getHtml().xpath("/html/body/div[14]/div[1]/div[2]/h1/text()").get();

        List<String> allText = page.getHtml().xpath("//*[@id=\"artical_real\"]/p").all();
        StringBuffer text = new StringBuffer();
        for (String s : allText){
            text.append(s);
        }
        List<String> imageText = page.getHtml().xpath("//div[@class=\"arl-c-txt\"]/p//img/@src").all();
        StringBuffer images = new StringBuffer();
        for (String s : imageText){
            images.append(s).append("\\x01");
        }
        String articleId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");
        if (articleId == null){
            articleId = "12" + new Random().nextInt(100000);
        }
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,articleId))
                .url(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(releaseTime)
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .addContentKV(AICCommonField.Field_Author,author)
                .addContentKV(AICCommonField.Field_Content, String.valueOf(text))
                .addContentKV(AICCommonField.Field_Images, String.valueOf(images))
                .addContentKV(AICCommonField.Field_Title,title)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData.setFilterPipelineResult(true);
        listCrawlerData.add(crawlerData);
    }


    private static void recordAgainRequest(CrawlerRequestRecord crawlerRequestRecord,List<CrawlerRequestRecord> parseList){
       int count = 0;
        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        if (crawlerRequestRecord.tagsCreator().bizTags().hasKVTag(RECORD_AGAIN_REQUEST)){
            count = Integer.valueOf(crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(RECORD_AGAIN_REQUEST));
            if (count >= 10){
                log.error("url excessive number of repeated downloads this url = "+url);
            }
        }else{
            count = 1;
        }
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .releaseTime(System.currentTimeMillis())
                .httpUrl(url)
                .recordKey(crawlerRequestRecord.getRecordKey()+count)
                .copyResultTags()
                .copyBizTags()
                .build();

        record.getHttpRequest().setCookies(crawlerRequestRecord.getHttpRequest().getCookies());
        record.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        record.getHttpRequest().setHeaders(crawlerRequestRecord.getHttpRequest().getHeaders());
        record.setNeedParsedPage(crawlerRequestRecord.isNeedParsedPage());
        record.setNeedWashPage(crawlerRequestRecord.isNeedWashPage());
        record.tagsCreator().bizTags().addCustomKV(RECORD_AGAIN_REQUEST,count++);
        parseList.add(record);

    }
    /**
     * 判断是否在时间范围内
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord,Long releaseTimeToLong){
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    }else if(hourFromNow != 0){
                        endTime = System.currentTimeMillis()-60000;//系统时间减去一分钟
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if(startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime){
                isRange = true;
            }
        }else{
            isRange = true;
        }
        return isRange;
    }
    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }
    @Override
    public String domain() {
        return domain;
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        return crawlerRecord.tagsCreator().bizTags().site().equals(site);
    }
}
