package com.chance.cc.crawler.development.scripts.pcauto.newCar;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;

public class PcautoNewCarCrawlerScript extends CrawlerCommonScript {
    private Logger log = LoggerFactory.getLogger(PcautoNewCarCrawlerScript.class);//日志
    private static final String DOMAIN = "pcauto";
    private static final String SITE = "newCar";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";


    //    https://www.pcauto.com.cn/nation/ycxc/index_1.html
    private static final String NewCarRegulars = "https://www.pcauto.com.cn/nation/ycxc/\\S*";//文章链接
    //    https://pcauto.pcvideo.com.cn/video-64847.html
    //    https://www.pcauto.com.cn/nation/2514/25142931.html
    //    https://www.pcauto.com.cn/teach/2483/24839491.html
    //    https://www.geeknev.com/news/264/2641375.html
    //    https://mpcauto.pcvideo.com.cn/v_64448.html
    private static final String ArticleRegulars = "https://www.pcauto.com.cn/\\S*/\\d*/\\S*.html\\S*";//文章详情链接
    private static final String ArticleTwoRegulars = "https://www.geeknev.com/news/\\S*/\\S*.html";//文章详情链接
    private static final String VideoRegulars = "https://pcauto.pcvideo.com.cn/video-\\S*.html";//视频详情链接
    private static final String VideoTwoRegulars = "https://mpcauto.pcvideo.com.cn/v_\\S*.html";//视频详情链接

    //    https://cmt.pcauto.com.cn/action/comment/list_good_json.jsp?url=https://pcauto.pcvideo.com.cn/video-64847.html&pageNo=3&pageSize=15
    private static final String CommentRegulars = "https://cmt.pcauto.com.cn/action/comment/list_good_json.jsp\\?url=\\S*&pageSize=15&pageNo=\\d*";//评论链接

    private static final String ArticleStrRegulars = "//www.pcauto.com.cn/\\S*/\\d*/\\d*.html";
    private static final String VideoStrRegulars = "//pcauto.pcvideo.com.cn/video-\\S*.html";


    @Override
    public String domain() {
        return DOMAIN;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(NewCarRegulars);
        addUrlRegular(ArticleRegulars);
        addUrlRegular(ArticleRegulars);
        addUrlRegular(VideoRegulars);
        addUrlRegular(CommentRegulars);
        addUrlRegular(ArticleStrRegulars);
        addUrlRegular(VideoStrRegulars);

    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        String site = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return SITE.equals(site);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList();
        //        判断页面是否加载成功
        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        if (page.getStatusCode() != 200 || (!page.isDownloadSuccess())) {
            log.error("download page url == {null} error status is {}", requestUrl, page.getStatusCode());
            if (page.getStatusCode() != 404) {//如果没有成功的下载  进行重新下载
                this.requestAgainCrawlerRecord(parsedLinks, crawlerRecord);
                crawlerRecord.setNeedWashPage(false);
                return parsedLinks;
            }
        }

        if (page.getRawText().contains("汽车网404")) {//如果没有成功的下载  进行重新下载
            log.error("download page url == {null} error status is {}", requestUrl, "下载失败");
            this.requestAgainCrawlerRecord(parsedLinks, crawlerRecord);
            crawlerRecord.setNeedWashPage(false);
            return parsedLinks;
        }


        if (crawlerRecord.getHttpRequest().getUrl().matches(NewCarRegulars)) {
            articleRequest(crawlerRecord, page, parsedLinks);//解析文章列表
        } else if (crawlerRecord.getHttpRequest().getUrl().matches(ArticleRegulars) || crawlerRecord.getHttpRequest().getUrl().matches(VideoRegulars)) {
            interactionRequest(crawlerRecord, page, parsedLinks);//解析文章内容
        } else if (crawlerRecord.getHttpRequest().getUrl().matches(CommentRegulars)) {
            commentRequest(crawlerRecord, page, parsedLinks);//解析文章内容
        }
        return parsedLinks;
    }

    //解析文章列表
    private void articleRequest(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parsedLinks) {
        String nextStr = page.getHtml().xpath("//a[./text()=\"下一页\"]/@href").get();
        if (nextStr != null) {
            String nextPageUrl = "https:" + nextStr;
            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(turnPageRequest);

        }

//        明细页解析
        List<Selectable> nodes = page.getHtml().xpath("//div[@class=\"pic-txt clearfix\"]").nodes();//所有文章
        for (Selectable node : nodes) {
            try {
                String articleUrl = node.xpath(".//a[@class=\"img-area\"]/@href").get();//文章url
                if (StringUtils.isBlank(articleUrl)) {
                    continue;
                }
                String commentUrl = "https://cmt.pcauto.com.cn/action/comment/list_good_json.jsp?url=" + articleUrl + "&pageSize=15&pageNo=1";
                if (articleUrl.matches(VideoStrRegulars)) {
                    commentUrl = "https://cmt.pcauto.com.cn/action/comment/list_good_json.jsp?url=https:" + articleUrl + "&pageSize=15&pageNo=1";
                    articleUrl = "https:" + articleUrl;
                }
                if (articleUrl.matches(ArticleStrRegulars)) {
                    commentUrl = "https://cmt.pcauto.com.cn/action/comment/list_good_json.jsp?url=https:" + articleUrl + "&pageSize=15&pageNo=1";
//                    articleUrl = "https:" + articleUrl.replaceAll(".html", "_all.html");
                    articleUrl = "https:" + articleUrl;
                }

                String pubTime = node.xpath(".//span[@class=\"time\"]/text()").get();//时间
                String authorName = node.xpath(".//span[@class=\"user\"]/a/text()").get();//作者

                crawlerRecord.tagsCreator().bizTags().addCustomKV(AICCommonField.Field_Urls, articleUrl);
                crawlerRecord.tagsCreator().bizTags().addCustomKV("commentUrl", commentUrl);
                crawlerRecord.tagsCreator().bizTags().addCustomKV(AICCommonField.Field_Author, authorName);
                CrawlerRequestRecord articleRecord = CrawlerRequestRecord.builder()//解析文章
                        .itemPageRequest(crawlerRecord)
                        .recordKey(articleUrl)
                        .httpUrl(articleUrl)
                        .releaseTime(DateUtils.parseDate(pubTime, "yyyy-MM-dd").getTime())
                        .copyBizTags()
                        .copyResultTags()
                        .needWashed(true)
                        .build();
                articleRecord.tagsCreator().bizTags().addCustomKV("pubTime", pubTime);
                parsedLinks.add(articleRecord);

            } catch (Exception e) {
                log.error(e.getMessage(), e);
            }
        }
    }

    private void interactionRequest(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parsedLinks) {
        String commentUrl = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("commentUrl");
        if (page.getRequest().getUrl().matches(VideoRegulars)) {
            String likes = page.getHtml().xpath("//span[@class=\"vote-txt txt\"]/text()").get();
            String views = page.getHtml().xpath("//li[@class=\"last\"]//text()").get();
            if (views.contains("万")) {
                views = Double.parseDouble(views.replaceAll("万", "")) * 10000 + "";
            }
            KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()//解析文章
                    .itemPageRequest(crawlerRecord)
                    .recordKey(commentUrl)
                    .httpUrl(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .needWashed(true)
                    .notFilterRecord()
                    .build();
            commentRecord.tagsCreator().bizTags().addCustomKV(AICCommonField.Field_I_Likes, likes);
            commentRecord.tagsCreator().bizTags().addCustomKV(AICCommonField.Field_I_Views, views);
            commentRecord.setFilter(filterInfoRecord.getFilter());
            commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            parsedLinks.add(commentRecord);
        } else {
            KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()//解析文章
                    .itemPageRequest(crawlerRecord)
                    .recordKey(commentUrl)
                    .httpUrl(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .needWashed(true)
                    .notFilterRecord()
                    .build();
            commentRecord.setFilter(filterInfoRecord.getFilter());
            commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            parsedLinks.add(commentRecord);
        }
    }


    private void commentRequest(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parsedLinks) {

        String comments = new Json(page.getRawText()).jsonPath($_type + ".total").get();
        crawlerRecord.tagsCreator().bizTags().addCustomKV(AICCommonField.Field_I_Comments, comments);
        if (Integer.parseInt(comments) > 0) {
            String commentUrl = page.getRequest().getUrl();
            String[] split = commentUrl.split("pageNo=");
            String nextPageUrl = split[0] + "pageNo=" + (Integer.parseInt(split[1]) + 1);
            if (Integer.parseInt(comments) - (15 * Integer.parseInt(split[1])) > 0) {
                KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .notFilterRecord()
                        .needWashed(true)
                        .build();
                commentRecord.setFilter(filterInfoRecord.getFilter());
                commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                parsedLinks.add(commentRecord);
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (page.getRawText().contains("汽车网404")) {//如果没有成功的下载  进行重新下载
            return crawlerDataList;
        }

        if (crawlerRecord.getHttpRequest().getUrl().matches(ArticleRegulars)) {//文章
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)) {
                crawlerDataList.addAll(articleArticleWash(crawlerRecord, page));
            }
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)) {
                crawlerDataList.addAll(articleListWash(crawlerRecord, page));
            }
        }
        if (crawlerRecord.getHttpRequest().getUrl().matches(VideoRegulars)) {//文章
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)) {
                crawlerDataList.addAll(articleVideoWash(crawlerRecord, page));
            }
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)) {
                crawlerDataList.addAll(articleListWash(crawlerRecord, page));
            }
        }
        if (crawlerRecord.getHttpRequest().getUrl().matches(CommentRegulars)) {//评论
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(interaction)) {
                crawlerDataList.addAll(articleInteractionWash(crawlerRecord, page));
            }

            if (crawlerRecord.tagsCreator().resultTags().hasDataType(comment)) {//评论
                crawlerDataList.addAll(commentWash(crawlerRecord, page));
            }
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(comment)) {//评论的回复
                crawlerDataList.addAll(replyWash(crawlerRecord, page));
            }
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(interaction)) {//评论互动量
                crawlerDataList.addAll(commentInteractionWash(crawlerRecord, page));
            }
        }
        return crawlerDataList;
    }

    //清洗文章链接
    public List<CrawlerData> articleListWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        try {
            String articleUrl = page.getRequest().getUrl();//文章url
            String releaseTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("pubTime");
            String time = DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime() + "";//时间字符串
            String[] articleId = articleUrl.replaceAll("video-", "").replaceAll("_all.html", "").replaceAll(".html", "").split("/");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), articleId[articleId.length - 1]))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(articleUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime())
                    .addContentKV("releaseTimeToLong", time)
                    .addContentKV("articleUrl", articleUrl)
                    .addContentKV(AICCommonField.Field_Author, "文章")
                    .flowInPipelineTag("redis")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            crawlerArticleDataList.add(crawlerData);
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
        return crawlerArticleDataList;
    }


    //  文章互动量清洗
    private List<CrawlerData> articleInteractionWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        String comments = page.getJson().jsonPath($_type + ".total").get();
        String articleUrl = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Field_Urls);
        String releaseTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("pubTime");
        String[] articleId = articleUrl.replaceAll("video-", "").replaceAll("_all.html", "").replaceAll(".html", "").split("/");

        if (articleUrl.matches(VideoRegulars)) {
            String likes = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Field_I_Likes);
            String views = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Field_I_Views);
            try {
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRecord, page)
                        .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), interaction.enumVal(), articleId[articleId.length - 1]))
                        .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), articleId[articleId.length - 1]))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                        .url(articleUrl)
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime())
                        .addContentKV(AICCommonField.Field_I_Comments, comments)
                        .addContentKV(AICCommonField.Field_I_Likes, likes)
                        .addContentKV(AICCommonField.Field_I_Views, views)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .flowInPipelineTag("kafka")
                        .build();
                crawlerData.setFilterPipelineResult(true);
                crawlerArticleDataList.add(crawlerData);
            } catch (Exception e) {
                log.error(e.getMessage(), e);
            }
            return crawlerArticleDataList;
        } else {
            try {
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRecord, page)
                        .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), interaction.enumVal(), articleId[articleId.length - 1]))
                        .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), articleId[articleId.length - 1]))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                        .url(articleUrl)
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime())
                        .addContentKV(AICCommonField.Field_I_Comments, comments)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .flowInPipelineTag("kafka")
                        .build();
                crawlerData.setFilterPipelineResult(true);
                crawlerArticleDataList.add(crawlerData);
            } catch (Exception e) {
                log.error(e.getMessage(), e);
            }
            return crawlerArticleDataList;
        }
    }


    //解析文章
    public List<CrawlerData> articleArticleWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        try {
            String articleUrl = page.getRequest().getUrl();//文章url
            String title = page.getHtml().xpath("//h1/text()").get();//标题
            String authorName = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Field_Author);
            String adminUrl = page.getHtml().xpath("//span[@id=\"author_baidu\"]/a/@href").get();//作者url
            String[] split = adminUrl.replaceAll("userId=", "/").replaceAll("#ad=4219", "").split("/");
            String authorId = split[split.length - 1];
            String releaseTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("pubTime");

            List<String> tag = page.getHtml().xpath("//p[@class=\"moreRead artTag\"]/a").all();//标签
            List<String> texts = page.getHtml().xpath("//div[@class=\"artText clearfix\"]//text()").all();//文章文本
            List<String> picture = page.getHtml().xpath("//div[@class=\"artText clearfix\"]//img/@src").all();//图片
            StringBuffer conents = new StringBuffer();//将文本拼接
            for (String text : texts) {
                conents.append(text).append(" ");
            }
            StringBuffer pictures = new StringBuffer();//拼接图片
            for (String text : picture) {
                pictures.append(text).append("\0x1");
            }
            StringBuffer tags = new StringBuffer();//拼接标签
            for (String text : tag) {
                tags.append(text).append(" ");
            }

            String[] articleId = articleUrl.replaceAll("video-", "").replaceAll("_all.html", "").replaceAll(".html", "").split("/");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), articleId[articleId.length - 1]))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(articleUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime())
                    .addContentKV(AICCommonField.Field_Content, conents.toString().trim())
                    .addContentKV(AICCommonField.Field_Title, title)
                    .addContentKV(AICCommonField.Field_Author, authorName)
                    .addContentKV(AICCommonField.Field_Author_Id, authorId)
                    .addContentKV(AICCommonField.Field_Images, pictures.toString().trim())
                    .addContentKV(AutoVMCommonField.Tag_Field_Topic_Type, tags.toString().trim())
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            crawlerArticleDataList.add(crawlerData);
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
        return crawlerArticleDataList;
    }

    //解析文章
    public List<CrawlerData> articleVideoWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        try {
            String articleUrl = page.getRequest().getUrl();//文章url
            String title = page.getHtml().xpath("//h1/text()").get();//标题
            String authorName = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Field_Author);
            String releaseTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("pubTime");

            List<String> tag = page.getHtml().xpath("//dl[@class=\"tags\"]/dd/a/text()").all();//标签
            List<String> texts = page.getHtml().xpath("//dl[@class=\"summary\"]/dd/text()").all();//文章文本
            StringBuffer conents = new StringBuffer();//将文本拼接
            for (String text : texts) {
                conents.append(text).append(" ");
            }
            StringBuffer tags = new StringBuffer();//拼接标签
            for (String text : tag) {
                tags.append(text).append(" ");
            }

            String[] articleId = articleUrl.replaceAll("video-", "").replaceAll("_all.html", "").replaceAll(".html", "").split("/");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), articleId[articleId.length - 1]))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(articleUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime())
                    .addContentKV(AICCommonField.Field_Content, conents.toString().trim())
                    .addContentKV(AICCommonField.Field_Title, title)
                    .addContentKV(AICCommonField.Field_Author, authorName)
                    .addContentKV(AutoVMCommonField.Tag_Field_Topic_Type, tags.toString().trim())
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            crawlerArticleDataList.add(crawlerData);
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
        return crawlerArticleDataList;
    }

    //  评论清洗
    private Collection<? extends CrawlerData> commentWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();

        List<Selectable> nodes = page.getJson().jsonPath($_type + ".data[*]").nodes();
        for (Selectable node : nodes) {
            try {
                String content = new Json(node.get()).jsonPath(".content").get();//评论内容
                String time = new Json(node.get()).jsonPath(".createTime").get();//回复时间

                String user = new Json(node.get()).jsonPath(".nickName").get();//评论用户
                String userId = new Json(node.get()).jsonPath(".userId").get();//评论用户id
                String commentId = new Json(node.get()).jsonPath(".id").get();//评论id
                String floor = new Json(node.get()).jsonPath(".floor").get();//楼数

                String articleUrl = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Field_Urls);//文章url
                String[] articleId = articleUrl.replaceAll("video-", "").replaceAll("_all.html", "").replaceAll(".html", "").split("/");

                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRecord, page)
                        .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), comment.enumVal(), commentId))
                        .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), articleId[articleId.length - 1]))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .url(articleUrl)
                        .releaseTime(DateUtils.parseDate(time, "yyyy-MM-dd HH:mm:ss").getTime())
                        .addContentKV(AICCommonField.Field_Content, content)
                        .addContentKV(AICCommonField.Field_Author, user)
                        .addContentKV(AICCommonField.Field_Author_Id, userId)
                        .addContentKV(AICCommonField.Field_Floor, floor)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .flowInPipelineTag("kafka")
                        .build();
                crawlerData.setFilterPipelineResult(true);
                crawlerArticleDataList.add(crawlerData);
            } catch (Exception e) {
                log.error(e.getMessage(), e);
            }
        }

        return crawlerArticleDataList;
    }

    //  评论互动量
    private Collection<? extends CrawlerData> commentInteractionWash(CrawlerRequestRecord crawlerRecord, HttpPage
            page) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        List<Selectable> nodes = page.getJson().jsonPath($_type + ".data[*]").nodes();//所有评论
        for (Selectable node : nodes) {
            try {

                String time = new Json(node.get()).jsonPath(".createTime").get();//回复时间
                String userId = new Json(node.get()).jsonPath(".userId").get();//评论用户id
                String commentId = new Json(node.get()).jsonPath(".id").get();//评论id
                String like = new Json(node.get()).jsonPath(".support").get();//赞

                String articleUrl = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Field_Urls);//文章url
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRecord, page)
                        .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), interaction.enumVal(), commentId))
                        .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), comment.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .url(articleUrl)
                        .releaseTime(DateUtils.parseDate(time, "yyyy-MM-dd HH:mm:ss").getTime())
                        .addContentKV(AICCommonField.Field_I_Likes, like)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .flowInPipelineTag("kafka")
                        .build();
                crawlerData.setFilterPipelineResult(true);
                crawlerArticleDataList.add(crawlerData);
            } catch (Exception e) {
                log.error(e.getMessage(), e);
            }
        }
        return crawlerArticleDataList;
    }

    //  评论回复
    private Collection<? extends CrawlerData> replyWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        List<Selectable> nodes = page.getJson().jsonPath($_type + ".data[*]").nodes();
        for (Selectable node : nodes) {
            String userId2 = new Json(node.get()).jsonPath(".replyRef.userId").get();//原评论用户id
            if (userId2 != null) {
                try {
                    String content1 = new Json(node.get()).jsonPath(".content").get();//回复内容
                    String time = new Json(node.get()).jsonPath(".createTime").get();//回复时间
                    String user1 = new Json(node.get()).jsonPath(".nickName").get();//回复用户
                    String userId1 = new Json(node.get()).jsonPath(".userId").get();//回复用户id
                    String commentId1 = new Json(node.get()).jsonPath(".id").get();//回复评论id
                    String commentId2 = new Json(node.get()).jsonPath(".replyRef.id").get();//原评论id
                    String commentId = commentId1 + commentId2;
                    String content2 = new Json(node.get()).jsonPath(".replyRef.content").get();//原评论内容
                    String user2 = new Json(node.get()).jsonPath(".replyRef.nickName").get();//原评论用户
                    String content = "回复：" + content1 + "原评论：" + content2;
                    String user = "回复：" + user1 + "原评论：" + user2;
                    String userId = "回复：" + userId1 + "原评论：" + userId2;
                    String articleUrl = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Field_Urls);//文章url
                    String[] articleId = articleUrl.replaceAll("video-", "").replaceAll("_all.html", "").replaceAll(".html", "").split("/");

                    CrawlerData crawlerData = CrawlerData.builder()
                            .data(crawlerRecord, page)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), comment.enumVal(), commentId))
                            .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), articleId[articleId.length - 1]))
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                            .url(articleUrl)
                            .releaseTime(DateUtils.parseDate(time, "yyyy-MM-dd HH:mm:ss").getTime())
                            .addContentKV(AICCommonField.Field_Content, content)
                            .addContentKV(AICCommonField.Field_Author, user)
                            .addContentKV(AICCommonField.Field_Author_Id, userId)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .flowInPipelineTag("kafka")
                            .build();
                    crawlerData.setFilterPipelineResult(true);
                    crawlerArticleDataList.add(crawlerData);
                } catch (Exception e) {
                    log.error(e.getMessage(), e);
                }
            }
        }
        return crawlerArticleDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {
    }

    /*
     * 重新下载
     * */
    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 30) {
                log.error("autohome download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }


}
