package com.chance.cc.crawler.development.scripts.tenxun.qqcarnews;

import com.alibaba.fastjson.JSON;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import com.jayway.jsonpath.PathNotFoundException;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

public class QQCarCrawlerScript extends CrawlerCommonScript {
    private static final String DOMAIN = "tenxun";
    private static final String SCRIPTSITE = "car";
    private static final String DETAIL_URL = "https://auto.qq.com/a/\\d{8}/\\d{6}\\.htm";//具体文章正则
    private static final String STARTURL = "https://auto.qq.com/newcar.htm";
    private static final String LISTURLPREFIX = "https://auto.qq.com/\\w+.htm";//腾讯汽车首页
    private static final String COMMIT_URL = "https://coral.qq.com/\\d{10}";//具体文章正则
    //评论内容正则
    private static final String COMMIT_NUM = "https://coral.qq.com/article/\\d{10}/commentnum\\?callback=_article\\d{10}commentnum&_=\\d{13}";
    //回复数量正则
    private static final String COMMIT_REPLAY = "https://coral.qq.com/article/\\d{10}/comment/v2\\?callback=_article\\d{10}commentv2&orinum=10&oriorder=[o|t]&pageflag=1&cursor=0&scorecursor=0&orirepnum=2&reporder=o&reppageflag=1&source=1&_=\\d{13}";
    //评论内容获取更多
    private static final String COMMENT_REPLAY_MORE = "https://coral.qq.com/article/\\d{10}/comment/v2\\?callback=_article\\d{10}commentv2&orinum=10&oriorder=t&pageflag=1&cursor=\\d{19}&scorecursor=0&orirepnum=2&reporder=o&reppageflag=1&source=1&_=\\d{13}";
    //查看下一条评论
    private static final String COMMENT_REPLAY_MORE_NEXT = "https://coral.qq.com/comment/\\d{19}/reply/v2\\?callback=_comment\\d{19}replyv2&targetid=\\d{10}&reqnum=10&pageflag=2&source=1&_=\\d{13}";
    //评论内容链接格式化字符串
    private static final String commentInfoFormat = "https://coral.qq.com/article/%s/comment/v2?callback=_article%scommentv2&orinum=10&oriorder=t&pageflag=1&cursor=0&scorecursor=0&orirepnum=2&reporder=o&reppageflag=1&source=1&_=%s";
    //回复数量链接格式化字符串
    private static final String commentNumFormat = "https://coral.qq.com/article/%s/commentnum?callback=_article%scommentnum&_=%s";
    private static final String commentLastFormat = "https://coral.qq.com/article/%s/comment/v2?callback=_article%scommentv2&orinum=10&oriorder=t&pageflag=1&cursor=%s&scorecursor=0&orirepnum=2&reporder=o&reppageflag=1&source=1&_=%s";
    private static final String commentNextFormat = "https://coral.qq.com/comment/%s/reply/v2?callback=_comment%sreplyv2&targetid=%s&reqnum=10&pageflag=2&source=1&_=%s";
    private static final Logger logger = LoggerFactory.getLogger(QQCarCrawlerScript.class);

    @Override
    public String domain() {
        return DOMAIN;
    }
    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(STARTURL);
        addUrlRegular(LISTURLPREFIX);
        addUrlRegular(DETAIL_URL);
        addUrlRegular(COMMIT_URL);
        addUrlRegular(COMMIT_NUM);
        addUrlRegular(COMMIT_REPLAY);
        addUrlRegular(COMMENT_REPLAY_MORE);
        addUrlRegular(COMMENT_REPLAY_MORE_NEXT);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(SCRIPTSITE);
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() != 200) {
            logger.error("{} status code : [{}]", requestUrl, httpPage.getStatusCode());
            crawlerRequestRecord.setNeedWashPage(false);
            return parseLinks;
        }


        if(requestUrl.matches(LISTURLPREFIX)){
            //功能模块解析
            List<String> modules = (List<String>)crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("moduleTags").getVal();
            for (String module : modules) {
                String moduleUrl = String.format("https://auto.qq.com/%s.htm",module);

                CrawlerRequestRecord moduleRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(moduleUrl)
                        .httpUrl(moduleUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .needWashed(false)
                        .build();
                parseLinks.add(moduleRecord);
            }
            detailRecord(crawlerRequestRecord, httpPage, parseLinks);
        }

        else if (requestUrl.matches(DETAIL_URL))
            commentRecord(crawlerRequestRecord, httpPage, parseLinks);
        else if (requestUrl.matches(COMMIT_REPLAY))
            commentMoreRecord(crawlerRequestRecord, httpPage, parseLinks);
        else if(requestUrl.matches(COMMENT_REPLAY_MORE))
            commentMoreRecord(crawlerRequestRecord,httpPage,parseLinks);
        return parseLinks;

    }

    public void detailRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinks) {
        //明细页解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//*[@id=\"LIST_LM\"]/li/div[@class=\"newTxt\"]").nodes();
        String url = httpPage.getRequest().getUrl().split("/")[3];
        String module = url.substring(0,url.lastIndexOf("."));
        for (Selectable node : nodes) {
            String itemUrl = "https://auto.qq.com" + node.xpath("./h3/a/@href").get();
            if (StringUtils.isEmpty(itemUrl)) continue;

            String itemTime = node.xpath("./div/h5/text()").get();
            if (StringUtils.isEmpty(itemTime)) continue;
            try {
                long releaseTime = DateUtils.parseDate(itemTime, "yyyy年MM月dd日").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(itemUrl)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTime)
                        .copyResultTags()
                        .copyBizTags()
                        .build();
                itemRecord.getHttpRequest().addExtra("module",module);
                itemRecord.getHttpRequest().addExtra("isWashArticle","yes");//标识需要采集文章
                parseLinks.add(itemRecord);
            } catch (ParseException e) {
                logger.error(e.getMessage(), e);
            }
        }
    }

    public void commentRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinks) {
        //评论页 回复数界面解析

        String articleUrl = httpPage.getRequest().getUrl();
        List<String> all = httpPage.getHtml().all();
        String cNum = all.get(0).substring(all.get(0).indexOf("cmt_id") + 8).trim().split(";")[0];
        String commentNumUrl = String.format(commentNumFormat, cNum, cNum, System.currentTimeMillis());

        CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(commentNumUrl)
                .httpUrl(commentNumUrl)
                .releaseTime(System.currentTimeMillis())
                .needWashed(true)
                .copyBizTags()
                .copyResultTags()
                .build();
        interactionRecord.getHttpRequest().addExtra("articleUrl", articleUrl.substring(articleUrl.lastIndexOf("/") + 1).split("\\.")[0]);
        interactionRecord.getHttpRequest().addExtra("isWashInteraction","yes");//标识需要采集回复
        parseLinks.add(interactionRecord);

        //判断record的中是否有过滤comment的条件
        if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
            logger.error("QQCar crawler comment need to filter information!");
            return;
        }
        KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);

        String commentInfoUrl = String.format(commentInfoFormat, cNum, cNum, System.currentTimeMillis());
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(commentInfoUrl)
                .httpUrl(commentInfoUrl)
                .releaseTime(System.currentTimeMillis())
                .needWashed(true)
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        commentRecord.setFilter(filterInfoRecord.getFilter());
        commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
        crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag("comment");

        commentRecord.getHttpRequest().addExtra("articleUrl", articleUrl.substring(articleUrl.lastIndexOf("/") + 1).split("\\.")[0]);
        commentRecord.getHttpRequest().addExtra("isWashComment","yes");//标识需要采集评论
        parseLinks.add(commentRecord);
    }

    public void commentMoreRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinks) {
        //评论页的评论更多条数json数据解析

        //判断record的中是否有过滤comment的条件
        if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
            logger.error("QQCar crawler comment need to filter information!");
            return;
        }
        KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);

        String commentNumUrl = httpPage.getRequest().getUrl();
        String commentKey = commentNumUrl.split("/")[4];
        Json json = httpPage.getJson().removePadding("_article" + commentKey + "commentv2");
        String data_last = null;
        String articleId = json.jsonPath($_type+".data.targetInfo.articleid").get().substring(8);
        int count = json.jsonPath($_type + ".data.oriCommList[*].content").all().size();
        if (count >= 10) {
            data_last = json.jsonPath($_type + ".data.last").get();
            String commentLastUrl = String.format(commentLastFormat, commentKey, commentKey, data_last, System.currentTimeMillis());
            CrawlerRequestRecord commentNumRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(commentLastUrl)
                    .httpUrl(commentLastUrl)
                    .releaseTime(System.currentTimeMillis())
                    .needWashed(true)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            commentNumRecord.getHttpRequest().addExtra("articleUrl", articleId);
            commentNumRecord.getHttpRequest().addExtra("isWashComment","yes");//标识评论的评论需要清洗

            commentNumRecord.setFilter(filterInfoRecord.getFilter());
            commentNumRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag("comment");
            parseLinks.add(commentNumRecord);
        }
        //查看更多回复的url解析
        for(int i = 0;i<count;i++){
            String oriReplyNumStr = json.jsonPath($_type+".data.oriCommList["+i+"].orireplynum").get();
            Integer oriReplyNum = Integer.valueOf(oriReplyNumStr);
            if(oriReplyNum>0) {
                String id = json.jsonPath($_type + ".data.oriCommList[" + i + "].id").get();
                String targetId = json.jsonPath($_type + ".data.oriCommList[" + i + "].targetid").get();
                String commentNextUrl = String.format(commentNextFormat, id, id, targetId, System.currentTimeMillis());
                CrawlerRequestRecord commentNextRecored = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(commentNextUrl)
                        .httpUrl(commentNextUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                commentNextRecored.getHttpRequest().addExtra("articleUrl", articleId);
                commentNextRecored.getHttpRequest().addExtra("isWashComment","yes");//标识评论更多(url)需要被清洗
                commentNextRecored.getHttpRequest().addExtra("replays","replays");//标识是否被回复，用于评论判断(回复的回复)

                commentNextRecored.setFilter(filterInfoRecord.getFilter());
                commentNextRecored.setFilterInfos(filterInfoRecord.getFilterInfos());
                crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag("comment");

                parseLinks.add(commentNextRecored);
            }
        }

    }

    /**
     * 清洗界面，调用washArticle,washInteraction方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<CrawlerData>();
        CrawlerResultTags crawlerResultTags = crawlerRequestRecord.tagsCreator().resultTags();
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();

        if (crawlerResultTags.hasDataType(article) && !Objects.isNull(extras.get("isWashArticle"))) {
            crawlerDataList.add(washArticle(crawlerRequestRecord, httpPage));
        }
        if (crawlerResultTags.hasDataType(comment) && !Objects.isNull(extras.get("isWashComment")))
            crawlerDataList.addAll(washComments(crawlerRequestRecord, httpPage));
        if (crawlerResultTags.hasDataType(interaction) && !Objects.isNull(extras.get("isWashInteraction")))
            crawlerDataList.add(washInteraction(crawlerRequestRecord, httpPage));
        return crawlerDataList;
    }

    /**
     * 清洗文章 获取content，title，author
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<String> texts = httpPage.getHtml().xpath("//*[@id=\"Cnt-Main-Article-QQ\"]/p[@class=\"text\"]/text()").all();
        String releaseTimeStr = httpPage.getHtml().xpath("//*/span[@class=\"a_time\"]").get();
        if (StringUtils.isEmpty(releaseTimeStr))
            releaseTimeStr = httpPage.getHtml().xpath("//*/span[@class=\"article-time\"]").get();

        StringBuffer contents = new StringBuffer();
        for (String text : texts) {
            contents.append(text).append(" ");
        }
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
        String author = null;
        String module = crawlerRequestRecord.getHttpRequest().getExtras().get("module").toString();
        //利用每个模块的名字，判断文章作者获取方式
        if( module.contains("guide") || module.contains("tech")){
            author = httpPage.getHtml().xpath("//*[@class=\"a_author\"]").get();
            if (StringUtils.isEmpty(author)) {
                author = httpPage.getHtml().xpath("//*[@class=\"a_source\"]").get();
                if(author.indexOf("</a>")!=-1) {
                    String guodu = author.split(">")[1];
                    author = guodu.substring(0, guodu.indexOf("<"));
                }
                else
                    author= author.substring( author.indexOf("：")+ 1);
            }
        }else if(module.contains("newcar")){
            author = httpPage.getHtml().xpath("//*[@class=\"a_author\"]").get();
            if(StringUtils.isEmpty(author))
                author = httpPage.getHtml().xpath("//*[@class=\"color-a-1\"]/a/text()").get();
            if (StringUtils.isEmpty(author)) {
                author = httpPage.getHtml().xpath("//*[@class=\"a_source\"]").get();
                if(author.indexOf("</a>")!=-1) {
                    String guodu = author.split(">")[1];
                    author = guodu.substring(0, guodu.indexOf("<"));
                }
                else
                    author= author.substring( author.indexOf("：")+ 1);
            }
        }else if(module.contains("evaluat")){
            author = httpPage.getHtml().xpath("//*[@class=\"a_source\"]").get();
            if(author.indexOf("</a>")!=-1){
                String guodu = author.split(">")[1];
                author = guodu.substring(0, guodu.indexOf("<"));
            }
        }else{//news
            author = httpPage.getHtml().xpath(("//*[@class=\"color-a-1\"]/a/text()")).get();
        }

        List<String> srcs = httpPage.getHtml().xpath("//*[@id=\"Cnt-Main-Article-QQ\"]/p/img/@src").all();
        StringBuffer srcStr = new StringBuffer();
        for (String src : srcs) {
            srcStr = srcStr.append("https:").append(src).append("\\x01");
        }
        try {
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                    .releaseTime(DateUtils.parseDate(releaseTimeStr, "yyyy-MM-dd HH:mm").getTime())
                    .addContentKV(Field_Content, contents.toString().trim())
                    .addContentKV(Field_Title, httpPage.getHtml().xpath("//*[@id=\"Main-Article-QQ\"]/div[1]/div[1]/div[2]/div[1]/h1").get())
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Images, srcStr.toString())
                    .resultLabelTag(article)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            logger.error(e.getMessage(), e);
        }
        return null;
    }

    public List<CrawlerData> washComments(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String articleKey = crawlerRequestRecord.getHttpRequest().getExtras().get("articleUrl").toString();
        String commentT = httpPage.getJson().get();
        String commentContent = null;
        String userId = null;
        String likes = null;
        String reComms = null;
        Long commentTimeL = null;
        Json commentTJson = new Json(commentT.substring(commentT.indexOf("(") + 1, commentT.lastIndexOf(")")));
        int count = 0;
        List<CrawlerData> crawlerDatas = new ArrayList<>();

        Object replays = crawlerRequestRecord.getHttpRequest().getExtras().get("replays");
        if(replays != null){
            count = Integer.parseInt(commentTJson.jsonPath($_type + ".data.oriComment.repnum").get());//循环次数
            for (int k = 0; k < count; k++) {
                String reContent = null;
                try {
                    reContent = commentTJson.jsonPath($_type + ".data.repCommList[" + k + "]" + ".content").get();
                }catch(PathNotFoundException e){
                    break;
                }
                Long reTime = Long.parseLong(commentTJson.jsonPath($_type + ".data.repCommList[" + k + "].time").get() + "000");
                String reUserid = commentTJson.jsonPath($_type + ".data.repCommList[" + k + "].userid").get();//用户id
                String reId = commentTJson.jsonPath($_type+".data.repCommList["+k+"].id").get();//回复id
                String likes2 = commentTJson.jsonPath($_type+".data.repCommList["+k+"].up").get();//回复的回复的点赞数
                String reComm2 = commentTJson.jsonPath($_type+".data.repCommList["+k+"].repnum").get();//回复的回复的回复数
                CrawlerData reCrawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerDataType.comment.enumVal(), reId))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerDataType.article.enumVal(), articleKey))
                        .releaseTime(reTime.longValue())
                        .addContentKV(Field_Author_Id, reUserid)
                        .addContentKV(Field_Content, reContent)
                        .addContentKV(Field_I_Likes,likes2)
                        .addContentKV(Field_I_Comments,reComm2)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .resultLabelTag(comment)
                        .build();
                crawlerDatas.add(reCrawlerData);
            }
        }else{
            count = commentTJson.jsonPath($_type + ".data.oriCommList[*].content").all().size();//循环次数
            int countRep = 0;
            for (int i = 0; i < count; i++) {
                Json comm = new Json(commentTJson.jsonPath($_type + ".data.oriCommList" + "[" + i + "]").get());
                commentContent = comm.jsonPath("content").get();//评论内容
                userId = comm.jsonPath("userid").get();//评论人id
                likes = comm.jsonPath("up").get();
                reComms = comm.jsonPath("repnum").get();
                String time = comm.jsonPath("time").get() + "000";
                commentTimeL = Long.parseLong(time);//评论时间
                String idStr = comm.jsonPath("id").get();//评论id
                Long id = Long.parseLong(idStr);//得到oriCommList中的id，提供给repCommList使用
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerDataType.comment.enumVal(), id))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerDataType.article.enumVal(), articleKey))
                        .releaseTime(commentTimeL.longValue())
                        .addContentKV(Field_Author_Id, userId)
                        .addContentKV(Field_Content, commentContent)
                        .addContentKV(Field_I_Likes,likes)
                        .addContentKV(Field_I_Comments,reComms)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .resultLabelTag(comment)
                        .build();
                crawlerDatas.add(crawlerData);
                String replayComm = commentTJson.jsonPath($_type + ".data.repCommList").get();
                if (commentT.indexOf("repCommList") != -1 && replayComm.indexOf(idStr) != -1) {
                    countRep = commentTJson.jsonPath($_type + ".data.repCommList." + id + "[*" + "]" + ".content").all().size();//评论回复循环次数

                    for (int k = 0; k < countRep; k++) {
                        String reContent = commentTJson.jsonPath($_type + ".data.repCommList." + id + "[" + k + "]" + ".content").get();
                        Long reTime = Long.parseLong(commentTJson.jsonPath($_type + ".data.repCommList." + id + "[" + k + "]" + ".time").get() + "000");
                        String reUserid = commentTJson.jsonPath($_type + ".data.repCommList." + id + "[" + k + "]" + ".userid").get();//用户id
                        String reId = commentTJson.jsonPath($_type+".data.repCommList."+id+"["+k+"]"+".id").get();//回复id
                        String likes2 = commentTJson.jsonPath($_type+".data.repCommList."+id+"["+k+"]"+".up").get();//回复的回复的点赞数
                        String reComm2 = commentTJson.jsonPath($_type+".data.repCommList."+id+"["+k+"]"+".repnum").get();//回复的回复的回复数
                        CrawlerData reCrawlerData = CrawlerData.builder()
                                .data(crawlerRequestRecord, httpPage)
                                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerDataType.comment.enumVal(), reId))
                                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerDataType.article.enumVal(), articleKey))
                                .releaseTime(reTime.longValue())
                                .addContentKV(Field_Author_Id, reUserid)
                                .addContentKV(Field_Content, reContent)
                                .addContentKV(Field_I_Likes,likes2)
                                .addContentKV(Field_I_Comments,reComm2)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                                .resultLabelTag(comment)
                                .build();
                        crawlerDatas.add(reCrawlerData);
                    }
                }
            }
        }
        return crawlerDatas;
    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String commentNumUrl = httpPage.getRequest().getUrl();
        String commentNumKey = commentNumUrl.split("/")[4];
        String commentNumT = httpPage.getJson().get();
        String commentNumStr = commentNumT.substring(commentNumT.indexOf("(") + 1, commentNumT.lastIndexOf(")"));
        String commentNum = new Json(commentNumStr).jsonPath($_type + ".data.commentnum").get();
        String articleKey = crawlerRequestRecord.getHttpRequest().getExtras().get("articleUrl").toString();
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), commentNumKey + System.currentTimeMillis()))
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerDataType.article.enumVal(), articleKey))
                .addContentKV(Field_I_Comments, commentNum)
                .resultLabelTag(interaction)
                .build();
        return crawlerData;
    }

}
