package com.chance.cc.crawler.development.scripts.sina;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.time.LocalDate;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.internalDownload;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Topic_Type;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/6/25 14:29
 * @Description
 *       新浪 清洗文章、回复
 **/
public class SinaCommonCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(SinaCommonCrawlerScript.class);
    private static final String DOMAIN = "sina";
    private static final String REQUEST_AGAIN_TAG = "sina_request_retry";
    private static final String SYC = "syc";//结果进行同步的字段
    private static final String KAFKA = "kafka";
    private static final String MYSQL = "mysql";
    private static final List<String> siteList = Arrays.asList("sports","auto","med","cj_searchKw","searchKw");

    private static final String ITEM_STOCK_URL = "http[s]*://stock.finance.sina.com.cn/stock/go.php/vReport_Show/kind/lastest/rptid/\\d+/index.phtml";
    private static final String ITEM_FINANCE_URL = "http[s]*://finance.sina.com.cn/7x24/\\d{4}-\\d{2}-\\d{2}/[a-z]*-[a-z0-9]*.shtml\\S*";

    private static final String ITEM_NUM_URL = "https://\\d+.sina.com.cn/[a-z\\_\\/]*\\d{4}-\\d{2}-\\d{2}/[a-z]+-[a-z0-9]*.shtml";
    private static final String ITEM_CJ_URL = "http[s]*://[t\\.]*cj.sina.[com\\.]*cn/articles/view/\\d+/[a-z0-9]*";
    private static final String ITEM_K_URL = "http[s]*://k.sina.com.cn/article_\\d+_[a-z0-9]*.html\\S*";
    private static final String ITEM_AUTO_URL = "http[s]*://[a-z]+.sina.com.cn/[a-z\\/\\_0-9]*/\\d{4}-\\d{2}-\\d{2}[0-9\\/]*/[a-z]*-[a-z0-9]*.shtml\\S*";
    private static final String ITEM_VR_URL = "http[s]*://vr.sina.com.cn/\\d{4}-\\d{2}-\\d{2}/[a-z]*-[a-z0-9]*.shtml";
    private static final String COMMENT_AUTO_SOURCE_URL = "https://comment.sina.com.cn/page/info?format=json&thread=1&channel=%s&newsid=%s&page_size=10&page=1";
    private static final String COMMENT_AUTO_URL = "https://comment.sina.com.cn/page/info\\S*";

    private static final String ITEM_K_AUTO_URL = "http[s]*://k.sina.cn/article_\\d+_[a-zA-Z0-9]*.html\\S*";
    private static final String ITEM_AUTO_K_URL = "https://[a-z]*.sina.cn/[a-z\\/]*\\d{4}-\\d{2}-\\d{2}/[a-z]*-[a-z0-9]*.d.html";
    private static final String COMMENT_K_AUTO_SOURCE_URL = "https://cmnt.sina.cn/aj/v2/list?channel=%s&newsid=%s&group=0&thread=1&page=1";
    private static final String COMMENT_K_AUTO_URL = "https://cmnt.sina.cn/aj/v2/list\\S*";

    private static final String ITEM_VIDEO_URL = "http[s]*://video.sina.com.cn/[a-z0-9\\/]*/\\d{4}-\\d{2}-\\d{2}/[a-zA-Z]*-[a-zA-Z0-9]*.d.html";
    private static final String VIDEO_COUNT_SOURCE_URL = "http://interface.sina.cn/auto/news/getAutoVideoCount.d.json?video_ids=%s";
    private static final String VIDEO_COUNT_URL = "http://interface.sina.cn/auto/news/getAutoVideoCount.d.json\\S*";
    private static final String ITEM_ZHONGCE_URL = "https://zhongce.sina.com.cn/article/view/\\d+";
    private static final String ZHONGCE_LIKE_SOURCE_URL = "https://zhongce.sina.com.cn/api/like/get_total_like/?format=json&id=%s&categoryId=2";
    private static final String ZHONGCE_LIKE_URL = "https://zhongce.sina.com.cn/api/like/get_total_like\\S*";
    private static final String COMMENT_VIDEO_SOURCE_URL = "http://comment5.news.sina.com.cn/page/info?format=json&thread=1&channel=%s&newsid=%s&page=1&page_size=20";
    private static final String COMMENT_VIDEO_URL = "http://comment5.news.sina.com.cn/page/info\\S*";

    private static final String COMMENT_REPLY_SOURCE_URL = "http://comment.sina.com.cn/cmnt/info?channel=%s&newsid=%s&mid=%s&format=json&page_size=10&page=1&order=0&thread=1";
    private static final String COMMENT_REPLY_URL = "http://comment.sina.com.cn/cmnt/info\\S*";


    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ITEM_STOCK_URL);
        addUrlRegular(ITEM_FINANCE_URL);

        addUrlRegular(ITEM_VIDEO_URL);
        addUrlRegular(ITEM_ZHONGCE_URL);
        addUrlRegular(COMMENT_VIDEO_URL);

        addUrlRegular(ITEM_NUM_URL);
        addUrlRegular(ITEM_AUTO_URL);
        addUrlRegular(ITEM_K_URL);
        addUrlRegular(ITEM_CJ_URL);
        addUrlRegular(ITEM_VR_URL);
        addUrlRegular(COMMENT_AUTO_URL);

        addUrlRegular(ITEM_K_AUTO_URL);
        addUrlRegular(COMMENT_K_AUTO_URL);

        addUrlRegular(COMMENT_REPLY_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().site();
        if(siteList.contains(site)){
            return true;
        }else{
            return false;
        }
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || (httpPage.getStatusCode() != 200)) {
            log.error("{} status code : {}", crawlerRequestRecord.getHttpRequest().getUrl(), httpPage.getStatusCode());
            if (httpPage.getStatusCode() != 404) {
                requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            }
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (requestUrl.matches(ITEM_VIDEO_URL) || requestUrl.matches(ITEM_AUTO_URL) || requestUrl.matches(ITEM_K_URL) || requestUrl.matches(ITEM_K_AUTO_URL)
                || requestUrl.matches(ITEM_AUTO_K_URL) || requestUrl.matches(ITEM_CJ_URL) || requestUrl.matches(ITEM_STOCK_URL) || requestUrl.matches(ITEM_FINANCE_URL)
                || requestUrl.matches(ITEM_VR_URL) || requestUrl.matches(ITEM_ZHONGCE_URL) || requestUrl.matches(ITEM_NUM_URL)) {
            itemUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_VIDEO_URL) || requestUrl.matches(COMMENT_AUTO_URL) || requestUrl.matches(COMMENT_K_AUTO_URL)) {
            commentUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_REPLY_URL)) {
            commentReplyUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }
        return parsedLinks;
    }

    private void commentReplyUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        try {
            httpPage.getJson().jsonPath($_type + ".result.threaddict").get();
        } catch (Exception e) {
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        String nextUrl = getNextUrl(requestUrl, null, "page");
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();
        commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        parsedLinks.add(commentRecord);


    }

    //详情页获取评论与互动量record
    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        boolean needWashPage = crawlerRequestRecord.isNeedWashPage();
        if (!needWashPage) {
            String releaseTime = "";
            if (requestUrl.matches(ITEM_K_URL)) {
                releaseTime = httpPage.getHtml().xpath("//span[@class=\"date\"]/text()").get();
            }

            if (StringUtils.isBlank(releaseTime)) {
                log.error("auto estation item url[{}] can not get releaseTime!", requestUrl);
                return;
            }

            try {
                long releaseTimeToLong = washTimeToLong(releaseTime);
                if (!isDateRange(crawlerRequestRecord, releaseTimeToLong)) {
                    return;
                }

                crawlerRequestRecord.setReleaseTime(releaseTimeToLong);
                crawlerRequestRecord.setNeedWashPage(true);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }

        String result = httpPage.getHtml().xpath("//p[@class=\"u-lost-des\"]").get();
        if (StringUtils.isNotBlank(result) && "文章不存在".equals(result)) {
            log.error("{} 文章不存在", requestUrl);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (!resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction) && !resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
            return;
        }

        if (requestUrl.matches(ITEM_VIDEO_URL)) {
            itemVideoUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        } else if (requestUrl.matches(ITEM_AUTO_URL) || requestUrl.matches(ITEM_K_URL) || requestUrl.matches(ITEM_VR_URL) || requestUrl.matches(ITEM_CJ_URL) || requestUrl.matches(ITEM_NUM_URL)) {
            itemAutoOrKUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        } else if (requestUrl.matches(ITEM_K_AUTO_URL) || requestUrl.matches(ITEM_AUTO_K_URL)) {
            itemKAutoUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        } else if (requestUrl.matches(ITEM_ZHONGCE_URL)) {
            itemZhongCeUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        } else if (requestUrl.matches(ITEM_STOCK_URL) || requestUrl.matches(ITEM_FINANCE_URL)) {
            //没有评论和互动量
            if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
                crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag("interaction");
            }
            if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
                crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag("comment");
            }
        }
    }

    private void itemKAutoUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String channel = getChannelOrId(httpPage, "product=[a-z]*");
        String id = getChannelOrId(httpPage, "index=[a-z0-9\\-]*");

        if (requestUrl.contains("zx.sina.cn")) {
            channel = getChannelOrId(httpPage, "channel=[a-z]*");
            id = getChannelOrId(httpPage, "newsid=[a-z0-9\\-]*");
        }

        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();

        if (StringUtils.isNotBlank(channel) && StringUtils.isNotBlank(id)) {
            String url = String.format(COMMENT_K_AUTO_SOURCE_URL, channel, id);
            if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
                resultTags.getCategoryTag().removeLabelTag("interaction");
                CrawlerRequestRecord interactionRecord = getInteractionRecord(crawlerRequestRecord, url);
                interactionRecord.getHttpRequest().addHeader("referer", requestUrl);
                parsedLinks.add(interactionRecord);
            }

            if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
                resultTags.getCategoryTag().removeLabelTag("comment");
                CrawlerRequestRecord commentRecord = getCommentRecord(crawlerRequestRecord, url);
                if (commentRecord != null) {
                    commentRecord.getHttpRequest().addHeader("referer", requestUrl);
                    parsedLinks.add(commentRecord);
                }
            }
        } else {
            log.error("channel[{}] or id[{}] is null! dont to wash page[{}]", channel, id, crawlerRequestRecord.getHttpRequest().getUrl());
            crawlerRequestRecord.setNeedWashPage(false);
        }
    }

    private void itemAutoOrKUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String channel = getChannelOrId(httpPage, "channel: '[a-z]*'");
        String id = getChannelOrId(httpPage, "newsid: '[a-zA-Z0-9\\-]*'");

        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();

        if (StringUtils.isNotBlank(channel) && StringUtils.isNotBlank(id)) {
            String url = String.format(COMMENT_AUTO_SOURCE_URL, channel, id);
            if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
                resultTags.getCategoryTag().removeLabelTag("interaction");
                parsedLinks.add(getInteractionRecord(crawlerRequestRecord, url));
            }

            if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
                resultTags.getCategoryTag().removeLabelTag("comment");
                CrawlerRequestRecord commentRecord = getCommentRecord(crawlerRequestRecord, url);
                if (commentRecord != null) {
                    parsedLinks.add(commentRecord);
                }
            }
        } else {
            log.error("channel[{}] or id[{}] is null! dont to wash page[{}]", channel, id, crawlerRequestRecord.getHttpRequest().getUrl());
            crawlerRequestRecord.setNeedWashPage(false);
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        try {
            httpPage.getJson().jsonPath($_type + ".result.cmntlist").all();
        } catch (Exception e) {
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        boolean comment = crawlerRequestRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.comment);
        if (comment) {

            String total = httpPage.getJson().jsonPath($_type + ".result.count.show").get();
            if (Integer.parseInt(total) > 0) {
//                String page = urlParams.get("page");
//                String pageSize = urlParams.get("page_size");
//                if ((Integer.parseInt(page) * Integer.parseInt(pageSize)) < Integer.parseInt(total)) {
                String nextUrl = getNextUrl(requestUrl, null, "page");
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(nextUrl)
                        .releaseTime(System.currentTimeMillis())
                        .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
//                            .notFilterRecord()
                        .copyBizTags()
                        .needWashed(true)
                        .copyResultTags()
                        .build();
                commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
                parsedLinks.add(commentRecord);
//                }


                //解析评论的回复
                if (requestUrl.matches(COMMENT_K_AUTO_URL)) {
                    JSONArray jsonArray = JSONObject.parseObject(httpPage.getRawText()).getJSONObject("result").getJSONArray("threaddict");
                    for (Object o : jsonArray) {
                        JSONObject jsonObject = (JSONObject)o;
                        //暂时未发现如何解析
                    }
                    if( jsonArray.size() > 0){
                        log.error("[{}] need to write!",crawlerRequestRecord.getHttpRequest().getExtras().get("articleUrl"));
                    }
                } else {
                    JSONObject threddit = JSONObject.parseObject(httpPage.getRawText()).getJSONObject("result").getJSONObject("threaddict");
                    if (threddit != null) {
                        getCommentReplyRecord(crawlerRequestRecord,httpPage,parsedLinks,threddit);
                    }
                }

            }

        }
    }

    private void getCommentReplyRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, JSONObject threddit) {
        String requestUrl = httpPage.getRequest().getUrl();
        Map<String, String> urlParams = getMapFormUrl(requestUrl);
        Map map = threddit.toJavaObject(Map.class);
        Set set = map.keySet();
        for (Object o : set) {
            String data = (String) map.get(o);
            JSONObject jsonObject = JSONObject.parseObject(data);
            int count = jsonObject.getIntValue("count");
            int size = jsonObject.getJSONArray("list").size();
            if (size < count) {
                //生成评论的回复
                String replyUrl = String.format(COMMENT_REPLY_SOURCE_URL, urlParams.get("channel"), urlParams.get("newsid"), o);
                CrawlerRequestRecord replyRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(replyUrl)
                        .releaseTime(System.currentTimeMillis())
                        .httpHeads(httpPage.getRequest().getHeaders())
                        .notFilterRecord()
                        .copyBizTags()
                        .needWashed(true)
                        .copyResultTags()
                        .build();
                replyRecord.getHttpRequest().setExtras(httpPage.getRequest().getExtras());
                parsedLinks.add(replyRecord);
            }
        }
    }

    private void itemVideoUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String videoId = httpPage.getHtml().xpath("//span[@class=\"playnum\"]/@videoid").get();
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
            if (StringUtils.isNotBlank(videoId)) {
                String url = String.format(VIDEO_COUNT_SOURCE_URL, videoId);
                parsedLinks.add(getInteractionInternalRecord(crawlerRequestRecord, url));
            } else {
                crawlerRequestRecord.getHttpRequest().addExtra("views", "0");
            }
        }

        itemComment5Record(crawlerRequestRecord, httpPage, parsedLinks);

    }

    private void itemZhongCeUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String id = requestUrl.substring(requestUrl.lastIndexOf("/"));

        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
            String url = String.format(ZHONGCE_LIKE_SOURCE_URL, id);
            parsedLinks.add(getInteractionInternalRecord(crawlerRequestRecord, url));
        }
    }

    private void itemComment5Record(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String channel = getChannelOrId(httpPage, "channel:[\"\\ \']*[a-z]*[\"\']*");
        String id = getChannelOrId(httpPage, "newsid:[\"\\ \']*[a-zA-Z0-9\\-\\_]*[\"\']*");

        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (StringUtils.isNotBlank(channel) && StringUtils.isNotBlank(id)) {
            String url = String.format(COMMENT_VIDEO_SOURCE_URL, channel, id);
            if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
                resultTags.getCategoryTag().removeLabelTag("interaction");
                parsedLinks.add(getInteractionInternalRecord(crawlerRequestRecord, url));
            }

            if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
                resultTags.getCategoryTag().removeLabelTag("comment");
                CrawlerRequestRecord commentRecord = getCommentRecord(crawlerRequestRecord, url);
                if (commentRecord != null) {
                    parsedLinks.add(commentRecord);
                }
            }
        } else {
            log.error("channel[{}] or id[{}] is null! dont to wash page[{}]", channel, id, crawlerRequestRecord.getHttpRequest().getUrl());
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
    }

    private CrawlerRequestRecord getCommentRecord(CrawlerRequestRecord crawlerRequestRecord, String commentUrl) {
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        CrawlerRecord commentFilter = null;
        if ((commentFilter = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class)) == null) {
            log.error("comment filter can not null!");
            return null;
        }

        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .recordKey(commentUrl + "comment")
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .needWashed(true)
                .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                .build();
        commentRecord.setFilter(commentFilter.getFilter());
        commentRecord.setFilterInfos(commentFilter.getFilterInfos());
        commentRecord.getHttpRequest().addExtra("articleUrl", crawlerRequestRecord.getHttpRequest().getUrl());
        return commentRecord;
    }

    private CrawlerRequestRecord getInteractionRecord(CrawlerRequestRecord crawlerRequestRecord, String interactionUrl) {
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(interactionUrl)
                .recordKey(interactionUrl + "interaction")
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                .build();
        record.getHttpRequest().addExtra("articleUrl", crawlerRequestRecord.getHttpRequest().getUrl());
        return record;
    }

    private CrawlerRequestRecord getInteractionInternalRecord(CrawlerRequestRecord crawlerRequestRecord, String interactionUrl) {
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(interactionUrl)
                .recordKey(interactionUrl + "interaction")
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .requestLabelTag(internalDownload)
                .build();
        return record;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
            String url = internalDownloadRecord.getHttpRequest().getUrl();
            if (url.matches(VIDEO_COUNT_URL)) {
                String videoId = url.split("=")[1];
                try {
                    String views = internalDownloadPage.getJson().jsonPath($_type + ".data.video_id." + videoId).get();
                    crawlerRecord.getHttpRequest().addExtra("views", views);
                } catch (Exception e) {
                    crawlerRecord.getHttpRequest().addExtra("views", "0");
                }
            }
            if (url.matches(COMMENT_VIDEO_URL)) {
                try {
                    String comments = internalDownloadPage.getJson().jsonPath($_type + ".result.count.show").get();
                    crawlerRecord.getHttpRequest().addExtra("comments", comments);
                } catch (Exception e) {
                    crawlerRecord.getHttpRequest().addExtra("comments", "0");
                }
                try {
                    String releaseTime = internalDownloadPage.getJson().jsonPath($_type + ".result.news.time").get();
                    long releaseTimeToLong = washTimeToLong(releaseTime);
                    crawlerRecord.setReleaseTime(releaseTimeToLong);
                } catch (Exception e) {
                    crawlerRecord.setReleaseTime(crawlerRecord.getReleaseTime());
                }
            }
            if (url.matches(ZHONGCE_LIKE_URL)) {
                try {
                    String likes = internalDownloadPage.getJson().jsonPath($_type + ".result.data.likeCount").get();
                    crawlerRecord.getHttpRequest().addExtra("likes", likes);
                } catch (Exception e) {
                    crawlerRecord.getHttpRequest().addExtra("likes", "0");
                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags resultTags = crawlerRecord.tagsCreator().resultTags();

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.article)) {
            crawlerDataList.addAll(washArticle(crawlerRecord, page));
        }

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
            crawlerDataList.addAll(washComment(crawlerRecord, page));
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        if (itemUrl.matches(ITEM_VIDEO_URL)) {
            requestRecord.tagsCreator().bizTags().addCustomKV("type", "video");
            crawlerDataList.addAll(washVideoArticleOrInteraction(requestRecord, httpPage));
        } else if (itemUrl.matches(ITEM_ZHONGCE_URL)) {
            requestRecord.tagsCreator().bizTags().addCustomKV("type", "article");
            crawlerDataList.addAll(washZhongCeArticleOrInteraction(requestRecord, httpPage));
        } else if (itemUrl.matches(ITEM_AUTO_URL) || itemUrl.matches(ITEM_K_URL) || itemUrl.matches(ITEM_CJ_URL) || itemUrl.matches(ITEM_VR_URL) || itemUrl.matches(ITEM_NUM_URL) ){
            requestRecord.tagsCreator().bizTags().addCustomKV("type", "article");
            crawlerDataList.addAll(washAutoOrKArticle(requestRecord, httpPage));
        } else if (itemUrl.matches(ITEM_K_AUTO_URL) || itemUrl.matches(ITEM_AUTO_K_URL)) {
            requestRecord.tagsCreator().bizTags().addCustomKV("type", "article");
            crawlerDataList.addAll(washKAutoArticle(requestRecord, httpPage));
        } else if (itemUrl.matches(ITEM_STOCK_URL)) {
            requestRecord.tagsCreator().bizTags().addCustomKV("type", "article");
            crawlerDataList.addAll(washStockArticle(requestRecord, httpPage));
        } else if (itemUrl.matches(ITEM_FINANCE_URL)) {
            requestRecord.tagsCreator().bizTags().addCustomKV("type", "article");
            crawlerDataList.addAll(washFinanceArticle(requestRecord, httpPage));
        }

        return crawlerDataList;
    }

    private CrawlerData washInteraction(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String site = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String itemUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = "";
        if (articleUrl.matches(ITEM_AUTO_URL) || articleUrl.matches(ITEM_VR_URL) || articleUrl.matches(ITEM_NUM_URL)) {
            articleKey = articleUrl.substring(articleUrl.lastIndexOf("-") + 1, articleUrl.lastIndexOf("."));
        } else if (articleUrl.matches(ITEM_K_URL) || articleUrl.matches(ITEM_K_AUTO_URL)) {
            articleKey = articleUrl.substring(articleUrl.lastIndexOf("_") + 1, articleUrl.lastIndexOf("."));
        } else if (articleUrl.matches(ITEM_CJ_URL)) {
            articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1);
        } else if (articleUrl.matches(ITEM_AUTO_K_URL)) {
            articleKey = articleUrl.substring(articleUrl.lastIndexOf("-") + 1, articleUrl.lastIndexOf(".d"));
        }


        String comments = httpPage.getJson().jsonPath($_type + ".result.count.show").get();
        comments = StringUtils.isBlank(comments) ? "0" : comments;
        String releaseTime = httpPage.getJson().jsonPath($_type + ".result.news.time").get();
        try {
            long releaseTimeToLong = washTimeToLong(releaseTime);
            CrawlerData interactionData = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_I_Comments, comments)
                    .flowInPipelineTag(KAFKA)
                    .build();
            interactionData.setFilterPipelineResult(true);
            return interactionData;
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return null;
    }


    private List<CrawlerData> washComment(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) requestRecord.getHttpRequest().getExtras().get("articleUrl");
        String articleKey = "";
        if (articleUrl.matches(ITEM_VIDEO_URL) || articleUrl.matches(ITEM_AUTO_K_URL)) {
            articleKey = articleUrl.substring(articleUrl.lastIndexOf("-") + 1, articleUrl.lastIndexOf(".d"));
        } else if (articleUrl.matches(ITEM_AUTO_URL) || articleUrl.matches(ITEM_VR_URL) || articleUrl.matches(ITEM_NUM_URL)) {
            articleKey = articleUrl.substring(articleUrl.lastIndexOf("-") + 1, articleUrl.lastIndexOf("."));
        } else if (articleUrl.matches(ITEM_K_URL) || articleUrl.matches(ITEM_K_AUTO_URL)) {
            articleKey = articleUrl.substring(articleUrl.lastIndexOf("_") + 1, articleUrl.lastIndexOf("."));
        } else if (articleUrl.matches(ITEM_CJ_URL) || articleUrl.matches(ITEM_ZHONGCE_URL)) {
            articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1);
        }

        try {
            List<String> commentList = httpPage.getJson().jsonPath($_type + ".result.cmntlist").all();
            for (String comment : commentList) {
                JSONObject jsonObject = JSONObject.parseObject(comment);
                washCommentList(requestRecord, httpPage, jsonObject, itemUrl, articleKey, crawlerDataList);
            }


            if(itemUrl.matches(COMMENT_K_AUTO_URL)){
                //暂时未发现
            }else{
                JSONObject threddict = JSONObject.parseObject(httpPage.getRawText()).getJSONObject("result").getJSONObject("threaddict");
                if (threddict != null) {
                    Map map = threddict.toJavaObject(Map.class);
                    Set set = map.keySet();
                    for (Object o : set) {
                        String data = (String) map.get(o);
                        JSONArray list = JSONObject.parseObject(data).getJSONArray("list");
                        for (Object reply : list) {
                            JSONObject jsonObject = JSONObject.parseObject((String) reply);
                            washCommentList(requestRecord, httpPage, jsonObject, itemUrl, articleKey, crawlerDataList);
                        }
                    }
                }
            }


        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerDataList;
    }

    private void washCommentList(CrawlerRequestRecord requestRecord, HttpPage httpPage, JSONObject jsonObject, String itemUrl, String articleKey, List<CrawlerData> crawlerDataList) throws ParseException {
        String site = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String commentId = jsonObject.getString("mid");
        String area = jsonObject.getString("area");
        String author = jsonObject.getString("nick");
        String authorId = jsonObject.getString("uid");
        String content = jsonObject.getString("content");
        String parentNick = jsonObject.getString("parent_nick");
        String layer = jsonObject.getString("layer");
        if (StringUtils.isNotBlank(parentNick) && Integer.parseInt(layer) > 2) {
            content = "回复" + parentNick + ":" + content;
        }
        String releaseTime = jsonObject.getString("time");

        long releaseTimeToLong = washTimeToLong(releaseTime);
        CrawlerData commentData = CrawlerData.builder()
                .data(requestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.comment.enumVal(), commentId))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                .url(itemUrl)
                .releaseTime(releaseTimeToLong)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                .addContentKV(Field_Author, author)
                .addContentKV(Field_Author_Id, authorId)
                .addContentKV(Field_Content, content)
                .addContentKV("area", area)
                .addContentKV("layer", layer)
                .addContentKV("parentNick", parentNick)
                .flowInPipelineTag(KAFKA)
                .build();
        commentData.setFilterPipelineResult(true);
        crawlerDataList.add(commentData);

        String likes = jsonObject.getString("agree");
        CrawlerData commentInteractionData = CrawlerData.builder()
                .data(requestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.comment.enumVal(), commentId))
                .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.comment.enumVal(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), commentId))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                .url(itemUrl)
                .releaseTime(releaseTimeToLong)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                .addContentKV(Field_I_Likes, likes)
                .flowInPipelineTag(KAFKA)
                .build();
        commentInteractionData.setFilterPipelineResult(true);
        crawlerDataList.add(commentInteractionData);
    }

    private List<CrawlerData> washFinanceArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String site = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("-") + 1, itemUrl.lastIndexOf("."));

        String releaseTime = httpPage.getHtml().xpath("//div[@class=\"news-date\"]/text()").get();
        List<String> all = httpPage.getHtml().xpath("//div[@id=\"artibody\"]//text()").all();
        StringBuffer conentBf = new StringBuffer();
        for (String s : all) {
            if (StringUtils.isBlank(s)) {
                continue;
            }
            conentBf.append(s).append(" ");
        }

        try {
            long releaseTimeToLong = StringUtils.isBlank(releaseTime) ? requestRecord.getReleaseTime() : washTimeToLong(releaseTime);
            CrawlerData article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .releaseTime(releaseTimeToLong)
                    .url(itemUrl)
                    .addContentKV(Field_Content, getContentNoEs(conentBf.toString()))
                    .flowInPipelineTag(KAFKA)
                    .build();
            article.setFilterPipelineResult(true);
            crawlerDataList.add(article);
        } catch (Exception e) {
            log.error(e.getMessage());
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washStockArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String site = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = washContent("\\d+", itemUrl);

        String title = httpPage.getHtml().xpath("//h1").get();
        String author = httpPage.getHtml().xpath("//span[text()='机构：']/a/text()").get();
        String releaseTime = washContent("日期：\\d{4}-\\d{2}-\\d{2}", httpPage.getRawText());
        List<String> all = httpPage.getHtml().xpath("//div[@class=\"blk_container\"]//text()").all();
        StringBuffer conentBf = new StringBuffer();
        for (String s : all) {
            if (StringUtils.isBlank(s)) {
                continue;
            }
            conentBf.append(s).append(" ");
        }

        try {
            long releaseTimeToLong = StringUtils.isBlank(releaseTime) ? requestRecord.getReleaseTime() : washTimeToLong(releaseTime);
            CrawlerData article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .releaseTime(releaseTimeToLong)
                    .url(itemUrl)
                    .addContentKV(Field_Title, getContentNoEs(title))
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Content, getContentNoEs(conentBf.toString()))
                    .flowInPipelineTag(KAFKA)
                    .build();
            article.setFilterPipelineResult(true);
            crawlerDataList.add(article);
        } catch (Exception e) {
            log.error(e.getMessage());
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washZhongCeArticleOrInteraction(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String site = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String title = httpPage.getHtml().xpath("//p[@class=\"tit \"]/text()").get();
        String author = httpPage.getHtml().xpath("//a[@class=\"author clearfix\"]/span/text()").get();
        String authorId = httpPage.getHtml().xpath("//a[@class=\"author clearfix\"]/@href").get();
        if (StringUtils.isNotBlank(authorId)) {
            String[] split = authorId.split("/");
            authorId = split[split.length - 1];
        }
        String releaseTime = httpPage.getHtml().xpath("//p[@class=\"tags clearfix\"]/span/text()").get();
        List<String> all = httpPage.getHtml().xpath("//div[@class=\"des\"]//text()").all();
        StringBuffer conentBf = new StringBuffer();
        for (String s : all) {
            if (StringUtils.isBlank(s)) {
                continue;
            }
            conentBf.append(s).append(" ");
        }
        List<String> imgs = httpPage.getHtml().xpath("//div[@class=\"des\"]//img/@src").all();

        try {
            long releaseTimeToLong = StringUtils.isBlank(releaseTime) ? requestRecord.getReleaseTime() : washTimeToLong(releaseTime);
            CrawlerData article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .releaseTime(releaseTimeToLong)
                    .url(itemUrl)
                    .addContentKV(Field_Title, getContentNoEs(title))
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Content, getContentNoEs(conentBf.toString()))
                    .addContentKV(Field_Images, imgs.toString())
                    .flowInPipelineTag(KAFKA)
                    .build();
            article.setFilterPipelineResult(true);
            crawlerDataList.add(article);


            Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
            if (extras != null) {
                String comments = (String) extras.get("comments");
                String likes = (String) extras.get("likes");

                CrawlerData interaction = CrawlerData.builder()
                        .data(requestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.interaction.enumVal(), articleKey))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                        .releaseTime(releaseTimeToLong)
                        .url(itemUrl)
                        .addContentKV(Field_I_Likes, likes)
                        .addContentKV(Field_I_Comments, comments)
                        .flowInPipelineTag(KAFKA)
                        .build();
                interaction.setFilterPipelineResult(true);
                crawlerDataList.add(interaction);
            }
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washKAutoArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String site = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = "";
        if (itemUrl.matches(ITEM_AUTO_K_URL)) {
            articleKey = itemUrl.substring(itemUrl.lastIndexOf("-") + 1, itemUrl.lastIndexOf(".d"));
        } else if (itemUrl.matches(ITEM_K_AUTO_URL)) {
            articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));
        }

        String title = httpPage.getHtml().xpath("//h1").get();
        String author = httpPage.getHtml().xpath("//h2[@class=\"weibo_user\"]").get();
        String authorId = httpPage.getHtml().xpath("//a[@class=\"weibo_info look_info\"]/@href").get();
        if (StringUtils.isNotBlank(authorId)) {
            authorId = authorId.substring(0, authorId.length() - 1);
        }
        authorId = StringUtils.isNotBlank(authorId) ? authorId.substring(authorId.lastIndexOf("/") + 1) : "";
        authorId = StringUtils.isBlank(authorId) ? httpPage.getHtml().xpath("//figure[@class=\"weibo_info look_info\"]/@data-uid|//div[@class=\"media_focus\"]/@data-muid").get() : authorId;
        String releaseTime = httpPage.getHtml().xpath("//span[@class=\"weibo_time_day\"]").get();
        List<String> all = httpPage.getHtml().xpath("//div[@class=\"xj_module\"]/p//text()|//article[@class=\"s_card article_box\"]//p//text()|//section[@class=\"art_pic_card art_content\"]//text()").all();
        StringBuffer conentBf = new StringBuffer();
        for (String s : all) {
            if (StringUtils.isBlank(s)) {
                continue;
            }
            conentBf.append(s).append(" ");
        }
        List<String> imgs = httpPage.getHtml().xpath("//div[@class=\"art_t img\"]//img/@data-src|//article[@class=\"s_card article_box\"]//img/@src|//section[@class=\"art_pic_card art_content\"]//img/@src").all();

        try {
            long releaseTimeToLong = StringUtils.isBlank(releaseTime) ? requestRecord.getReleaseTime() : washTimeToLong(releaseTime);
            CrawlerData article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .releaseTime(releaseTimeToLong)
                    .url(itemUrl)
                    .addContentKV(Field_Title, getContentNoEs(title))
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Content, getContentNoEs(conentBf.toString()))
                    .addContentKV(Field_Images, imgs.toString())
                    .flowInPipelineTag(KAFKA)
                    .build();
            article.setFilterPipelineResult(true);
            crawlerDataList.add(article);


        } catch (ParseException e) {
            log.error(e.getMessage());
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washAutoOrKArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String site = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = "";
        if (itemUrl.matches(ITEM_AUTO_URL) || itemUrl.matches(ITEM_VR_URL) || itemUrl.matches(ITEM_NUM_URL)) {
            articleKey = itemUrl.substring(itemUrl.lastIndexOf("-") + 1, itemUrl.lastIndexOf("."));
        } else if (itemUrl.matches(ITEM_K_URL)) {
            articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));
        } else if (itemUrl.matches(ITEM_CJ_URL)) {
            articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);
        }

        String title = httpPage.getHtml().xpath("//h1[@class=\"main-title\"]").get();
        String author = httpPage.getHtml().xpath("//a[@class=\"source ent-source\"]|//div[@class=\"txtdetail\"]/a/text()|//div[@class=\"date-source\"]/a|//span[@class=\"source ent-source\"]").get();
        String authorId = httpPage.getHtml().xpath("//a[@class=\"source ent-source\"]/@href").get();
        authorId = StringUtils.isNotBlank(authorId) ? authorId.substring(authorId.lastIndexOf("/") + 1) : "";
        String releaseTime = httpPage.getHtml().xpath("//span[@class=\"date\"]").get();
        releaseTime = StringUtils.isBlank(releaseTime) ? washContent("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", httpPage.getRawText()) : releaseTime;
        List<String> all = httpPage.getHtml().xpath("//div[@id=\"artibody\"]//p//text()|//div[@id=\"article\"]//p//text()").all();
        StringBuffer conentBf = new StringBuffer();
        for (String s : all) {
            if (StringUtils.isBlank(s) || s.contains("video_id")) {
                continue;
            }
            conentBf.append(s).append(" ");
        }
        List<String> imgs = httpPage.getHtml().xpath("//div[@id=\"artibody\"]//img/@src|//div[@id=\"article\"]//img/@src").all();
        List<String> keywords = httpPage.getHtml().xpath("//div[@id=\"keywords\"]/a").all();

        try {
            long releaseTimeToLong = StringUtils.isBlank(releaseTime) ? requestRecord.getReleaseTime() : washTimeToLong(releaseTime);
            CrawlerData article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .releaseTime(releaseTimeToLong)
                    .url(itemUrl)
                    .addContentKV(Field_Title, getContentNoEs(title))
                    .addContentKV(Field_Source, author)
//                    .addContentKV(Field_Author_Id,authorId)
                    .addContentKV(Field_Content, getContentNoEs(conentBf.toString()))
                    .addContentKV(Field_Images, imgs.toString())
                    .flowInPipelineTag(KAFKA)
                    .build();
            if (keywords != null && keywords.size() > 0) {
                article.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type, keywords);
            }
            article.setFilterPipelineResult(true);
            crawlerDataList.add(article);


            //同步更新文章地址与时间
            String syc = "";
            if (StringUtils.isNotBlank(syc = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(SYC)) && "true".equals(syc)) {
                String siteInfo = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site_info");
                CrawlerData sycArticle = CrawlerData.builder()
                        .data(requestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, siteInfo, articleKey))
                        .url(itemUrl)
                        .addContentKV("itemUrl", itemUrl)
                        .addContentKV("releaseTimeToLong", String.valueOf(releaseTimeToLong))
                        .flowInPipelineTag(MYSQL)
                        .build();
                sycArticle.setFilterPipelineResult(true);
                crawlerDataList.add(sycArticle);
            }
        } catch (ParseException e) {
            log.error(e.getMessage());
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washVideoArticleOrInteraction(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String site = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("-") + 1, itemUrl.lastIndexOf(".d"));

        String title = httpPage.getHtml().xpath("//span[@class=\"text\"]|//h2[@title]").get();
        String content = httpPage.getHtml().xpath("//em[@task=\"oldinfor\"]/p//text()").get();
        CrawlerData article = CrawlerData.builder()
                .data(requestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                .releaseTime(requestRecord.getReleaseTime())
                .url(itemUrl)
                .addContentKV(Field_Title, getContentNoEs(title))
                .addContentKV(Field_Content, getContentNoEs(content))
                .flowInPipelineTag(KAFKA)
                .build();
        article.setFilterPipelineResult(true);
        crawlerDataList.add(article);

        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        if (extras != null) {
            String views = (String) extras.get("views");
            String comments = (String) extras.get("comments");

            CrawlerData interaction = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .releaseTime(requestRecord.getReleaseTime())
                    .url(itemUrl)
                    .addContentKV(Field_I_Views, views)
                    .addContentKV(Field_I_Comments, comments)
                    .flowInPipelineTag(KAFKA)
                    .build();
            interaction.setFilterPipelineResult(true);
            crawlerDataList.add(interaction);
        }

        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error("sina download page the number of retries exceeds the limit" +
                        ",request url {},detail is [{}]", crawlerRecord.getHttpRequest().getUrl(), JSONObject.toJSONString(crawlerRecord));
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static String washContent(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    //获取没有转义符号的字符串
    private static String getContentNoEs(String content) {
        if (StringUtils.isBlank(content)) {
            return "";
        }

        int time = 0;
        String result = content;
        while (content.contains("&") && content.contains(";")) {
            result = StringEscapeUtils.unescapeHtml(result);
            if (time >= 5) {
                break;
            }
            time++;
        }
        return result;
    }

    private static long washTimeToLong(String time) throws ParseException {
        if (StringUtils.isBlank(time)) {
            return 0;
        }

        time = time.trim();
        if (time.matches("\\d{2}月\\d{2}日") || time.matches("\\d{2}月\\d{2}日 \\d{2}:\\d{2}")) {
            time = LocalDate.now().getYear() + time;
        }
        return DateUtils.parseDate(time, "yyyy-MM-dd HH:mm:ss", "发表于 yyyy/MM/dd HH:mm", "yyyyMM月dd日", "yyyy-MM-dd HH:mm", "yyyyMM月dd日 HH:mm", "日期：yyyy-MM-dd", "yyyy年MM月dd日 HH:mm").getTime();
    }

    //获取comment地址中需要的id或者channel
    private String getChannelOrId(HttpPage httpPage, String regx) {
        String result = washContent(regx, httpPage.getRawText());
        if (StringUtils.isBlank(result)) {
            return null;
        }
        if (result.contains("'")) {
            result = result.substring(result.indexOf("'") + 1, result.lastIndexOf("'"));
        } else if (result.contains("\"")) {
            result = result.substring(result.indexOf("\"") + 1, result.lastIndexOf("\""));
        } else if (result.contains("=")) {
            result = result.split("=")[1].trim();
        }
        return result;
    }

    private String getNextUrl(String requestUrl, String keyword, String page) {
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if (StringUtils.isNotBlank(page) && page.equals(name)) {
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            } else if (StringUtils.isNotBlank(keyword) && keyword.equals(name)) {
                try {
                    nextUrl = nextUrl + name + "=" + URLEncoder.encode(value, "UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
                    log.error(e.getMessage());
                }
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }
        }
        return nextUrl.substring(0, nextUrl.length() - 1);
    }

    private Map<String, String> getMapFormUrl(String requestUrl) {
        Map<String, String> map = new HashMap<>();
        String[] split = requestUrl.split("\\?");
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            map.put(name, value);
        }
        return map;
    }

    /**
     * 判断是否在时间范围内
     *
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord, Long releaseTimeToLong) {
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    } else if (hourFromNow != 0) {
                        endTime = System.currentTimeMillis();
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if (startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime) {
                isRange = true;
            }
        } else {
            isRange = true;
        }
        return isRange;
    }

    public static void main(String[] args) {
        String s = "2021年07月23日 10:48";

        try {
            long l = washTimeToLong(s);
            System.out.println(l);
        } catch (ParseException e) {
            e.printStackTrace();
        }


    }
}
