package com.chance.cc.crawler.development.scripts.xcar.aikahao;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.BufferedReader;
import java.io.FileReader;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.time.LocalDate;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.*;
import static org.apache.commons.lang3.time.DateUtils.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/3/5 15:16
 * @Description
 *      爱卡汽车 爱咖号
 **/
public class XcarAiKaHaoCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(XcarAiKaHaoCrawlerScript.class);
    private static final String DOMAIN = "xcar";
    private static final String SITE = "aikahao";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";
    private static final String CRAWLER_MODULE_LIST = "crawlerModule";
    private static final String COMMENT_FILTER_INFO = "comment_filter_info";
    private static final String FILE_PATH = "filePath";

    private static final String ENTRANCE_URL = "https://aikahao.xcar.com.cn/";
    private static final String MODULE_SOURCE_URL = "https://aikahao.xcar.com.cn/xmediaapi/article/get_index_list?st=%s&page=1&score=3&rows=15&temp_id=%s&last_id=&e_put_time=";
    private static final String MODULE_URL = "https://aikahao.xcar.com.cn/xmediaapi/article/get_index_list\\S*";
    private static final String ITEM_ARTICLE_SOURCE_URL = "https://aikahao.xcar.com.cn/item/%s.html";
    private static final String ITEM_VIDEO_SOURCE_URL = "https://aikahao.xcar.com.cn/video/%s.html";
    private static final String ITEM_ARTICLE_URL = "https://aikahao.xcar.com.cn/item/\\d+.html";
    private static final String ITEM_VIDEO_URL = "https://aikahao.xcar.com.cn/video/\\d+.html";
    private static final String SERIES_SOURCE_URL = "https://news.xcar.com.cn/api/pserids/aikahaopserids?pserids=%s&default_pserid=%s&_=%s";
    private static final String SERIES_URL = "https://news.xcar.com.cn/api/pserids/aikahaopserids\\S*";
    private static final String COMMENT_SOURCE_URL = "https://comment.xcar.com.cn/interface/index.php?iact=CommentLevel&cid=%s&action=getNewsComment&sort=time&ctype=14&page=1&limit=25";
    private static final String COMMENT_URL = "https://comment.xcar.com.cn/interface/index.php\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(MODULE_URL);
        addUrlRegular(ITEM_ARTICLE_URL);
        addUrlRegular(ITEM_VIDEO_URL);
        addUrlRegular(COMMENT_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return SITE.equals(site);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> records = new ArrayList<>();

        String requestUrl = requestRecord.getHttpRequest().getUrl();
        if (requestUrl.matches(ENTRANCE_URL)) {
            if ((requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().get(CRAWLER_MODULE_LIST)) != null) {
                List<String> crawlerModule = (List<String>) requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(CRAWLER_MODULE_LIST).getVal();
                requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(CRAWLER_MODULE_LIST);
                for (String module : crawlerModule) {
                    String url = String.format(MODULE_SOURCE_URL,System.currentTimeMillis(), module);
                    CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                            .turnPageRequest(requestRecord)
                            .httpUrl(url)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .copyBizTags()
                            .copyResultTags()
                            .build();
                    List<String> modulePath = new ArrayList<>();
                    String moduleName = "";
                     if ("1".equals(module)) {
                        moduleName = "最新文章";
                    } else if ("2".equals(module)) {
                        moduleName = "最新视频";
                    }
                    modulePath.add(moduleName);
                    record.tagsCreator().bizTags().addCustomKV(Field_Path, modulePath);
                    records.add(record);
                }
            }

            //通过文件单独采集文章或者视频
            if(requestRecord.getHttpRequest().getExtras() != null){
                String filePath = (String)requestRecord.getHttpRequest().getExtras().get(FILE_PATH);
                try {
                    BufferedReader bufferedReader = new BufferedReader(new FileReader(filePath));
                    String url = "";
                    //按行读取文件
                    while ((url = bufferedReader.readLine()) != null){
                        if(StringUtils.isBlank(url)){
                            continue;
                        }

                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(requestRecord)
                                .httpUrl(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .copyBizTags()
                                .copyResultTags()
                                .build();
                        records.add(itemRecord);
                    }
                } catch (Exception e) {
                    log.error(e.getMessage());
                }
            }
        }

        return records;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() == 503) {
            log.error(DOMAIN + " page download error!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();

        if(requestUrl.matches(MODULE_URL)){
            moduleUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(ITEM_ARTICLE_URL) || requestUrl.matches(ITEM_VIDEO_URL)){
            itemUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(COMMENT_URL)){
            commentUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        return parsedLinks;
    }

    private void moduleUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String msg = httpPage.getJson().jsonPath($_type + ".msg").get();
        if (!msg.contains("操作成功")) {
            log.error("msg[{}]!url : [{}]",msg, crawlerRequestRecord.getHttpRequest().getUrl());
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        List<String> dataList = httpPage.getJson().jsonPath($_type + ".data.list").all();
        JSONObject lastJson = JSONObject.parseObject(dataList.get(dataList.size() - 1));
        String lastId = lastJson.getString("id");
        String putTime = lastJson.getString("put_time");
        //翻页
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if ("page".equals(name)) {
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            }else if("st".equals(name)){
                nextUrl = nextUrl + name + "=" + System.currentTimeMillis() + "&";
            }else if("last_id".equals(name)){
                nextUrl = nextUrl + name + "=" + lastId + "&";
            }else if("e_put_time".equals(name)){
                nextUrl = nextUrl + name + "=" + putTime + "&";
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }
        }
        nextUrl = nextUrl.substring(0, nextUrl.length() - 1);
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);

//        列表页解析
        for (String data : dataList) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String id = jsonObject.getString("id");
            String tempId = jsonObject.getString("temp_id");
            if (StringUtils.isBlank(id) || StringUtils.isBlank(tempId)) {
                log.error("id or tempId is null !");
                continue;
            }
            String itemUrl = "";
            if ("2".equals(tempId)) {
                //视频
                itemUrl = String.format(ITEM_VIDEO_SOURCE_URL, id);
            } else if ("1".equals(tempId)) {
                //文章
                itemUrl = String.format(ITEM_ARTICLE_SOURCE_URL, id);
            }
            if (StringUtils.isBlank(itemUrl)) {
                log.error("itemUrl is null!");
                continue;
            }

            String releaseTime = jsonObject.getString("put_time");
            if (StringUtils.isBlank(releaseTime)) {
                log.error("releaseTime can not is null!");
                continue;
            }

            long releaseTimeToLong = Long.parseLong(releaseTime + "000");
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .copyResultTags()
                    .copyBizTags()
                    .build();

            String siteBiz = itemRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site_biz");
            if (StringUtils.isNotBlank(siteBiz)) {
                if ("2".equals(tempId)) {
                    itemRecord.tagsCreator().bizTags().addSiteBiz("hao_video-"+siteBiz);
                } else if ("1".equals(tempId)) {
                    itemRecord.tagsCreator().bizTags().addSiteBiz("hao_article-"+siteBiz);
                }
            }else{
                log.error("siteBiz can not is null !");
                return;
            }
            parsedLinks.add(itemRecord);
        }
    }

    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();

        //获取评论数
        String cid = requestUrl.substring(requestUrl.lastIndexOf("/") + 1,requestUrl.lastIndexOf("."));
        String url = String.format(COMMENT_SOURCE_URL,cid);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .recordKey(url + "getCommentsCount")
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .needWashed(false)
                .needParsed(false)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(record);

        //获取车系信息
        String pserids = httpPage.getHtml().xpath("//input[@name=\"pserids\"]/@value").get();
        String pserid = httpPage.getHtml().xpath("//input[@name=\"pserid\"]/@value").get();
        if(StringUtils.isNotEmpty(pserids) && StringUtils.isNotBlank(pserid)){
            String cariesUrl = String.format(SERIES_SOURCE_URL,pserids,pserid,System.currentTimeMillis());
            CrawlerRequestRecord seriesRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(cariesUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .needParsed(false)
                    .needWashed(false)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            parsedLinks.add(seriesRecord);
        }

        //评论
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if(resultTags.hasDataType(comment)){
            resultTags.getCategoryTag().removeLabelTag("comment");
            CrawlerRequestRecord commentFilter = null;
            if ((commentFilter = resultTags.getCategoryTag().getKVTagObjVal(COMMENT_FILTER_INFO, CrawlerRequestRecord.class)) == null) {
                log.error("comment filter info can not is null!");
                return;
            }
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .needWashed(true)
                    .resultLabelTag(comment)
                    .build();
            commentRecord.setFilter(commentFilter.getFilter());
            commentRecord.setFilterInfos(commentFilter.getFilterInfos());
            commentRecord.getHttpRequest().addExtra("articleUrl", requestUrl);
            parsedLinks.add(commentRecord);
        }

    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        try {
            httpPage.getJson().jsonPath($_type + ".config.count");
        } catch (Exception e) {
            log.error("comment page [{}] download is error!will retry", crawlerRequestRecord.getHttpRequest().getUrl());
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        String message = httpPage.getJson().jsonPath($_type + ".info.message").get();
        if(StringUtils.isNotBlank(message) && "无评论".equals(message)){
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        //评论翻页
        String requestUrl = httpPage.getRequest().getUrl();
        int pageNo = 0;
        int pageSize = 0;
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if ("page".equals(name)) {
                pageNo = Integer.parseInt(value);
                nextUrl = nextUrl + name + "=" + (pageNo + 1) + "&";
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }

            if("limit".equals(name)){
                pageSize = Integer.parseInt(value);
            }
        }
        nextUrl = nextUrl.substring(0, nextUrl.length() - 1);
        int totalPage = Integer.parseInt(httpPage.getJson().jsonPath($_type + ".config.count").get());
        if (pageNo * pageSize < totalPage) {
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .needWashed(true)
                    .copyResultTags()
                    .build();
            commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
            parsedLinks.add(commentRecord);
        }
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
            String url = internalDownloadPage.getRequest().getUrl();
            if(url.matches(COMMENT_URL)){
                String comments = "0";
                try {
                    comments = internalDownloadPage.getJson().jsonPath($_type + ".config.count").get();
                } catch (Exception e) {
                    comments = "0";
                }
                crawlerRecord.getHttpRequest().addExtra("comments", comments);
            }else if(url.matches(SERIES_URL)){
                try{
                    String substring = url.substring(url.indexOf("=") + 1, url.indexOf("&"));
                    List<String> psides = substring.contains(",") ? Arrays.asList(substring.split(",")) : Arrays.asList(substring);
                    String psidsInfo = internalDownloadPage.getJson().jsonPath($_type + ".api_data.detail.psids_info").get();
                    if(StringUtils.isBlank(psidsInfo)){
                        continue;
                    }
                    List<Map<String,String>> seriesList = new ArrayList<>();
                    for (String pside : psides) {
                        Map<String,String> series = new HashMap<>();
                        JSONObject jsonObject = JSONObject.parseObject(psidsInfo).getJSONObject("k" + pside);
                        if(jsonObject == null){
                            continue;
                        }
                        String seriesUrl = jsonObject.getJSONObject("link").getJSONObject("pseridinfo").getString("link_url");
                        String seriesName = jsonObject.getJSONObject("show_newcar").getJSONObject(pside).getString("psname");
                        series.put("series_id",pside);
                        series.put("series_name",seriesName);
                        series.put("series_url",seriesUrl);
                        seriesList.add(series);
                    }
                    crawlerRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Series,seriesList);
                }catch (Exception e){
                    log.error(e.getMessage());
                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("."));
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        String title = httpPage.getHtml().xpath("//span[@class=\"detail_title\"]/text()").get();
        String author = httpPage.getHtml().xpath("//span[@class=\"detail_txt_lf\"]/a/text()").get();
        String authorId = httpPage.getHtml().xpath("//input[@name=\"author_id\"]/@value").get();
        String releaseTime = httpPage.getHtml().xpath("//span[@class=\"browse_time\"]/text()").get();
        long releaseTimeToLong = 0;
        try {
            releaseTimeToLong = DateUtils.parseDate(releaseTime,"yyyy-MM-dd").getTime();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        List<String> contents = httpPage.getHtml().xpath("//div[@class=\"detail_list_p clearfix\"]//p//text()|//dt[text()='视频简介：']/following-sibling::dd//text()").all();
        StringBuffer contentBf = new StringBuffer();
        for (String data : contents) {
            contentBf.append(data).append(" ");
        }
        String authorFollows = httpPage.getHtml().xpath("//span[@class=\"username_follow_txt\"]/text()").get();
        authorFollows = StringUtils.isNotBlank(authorFollows) ? authorFollows.split("粉丝")[0] : "0";
        List<String> topicType = httpPage.getHtml().xpath("//dt[text()='\n" +
                "                            视频标签：\n" +
                "                        ' or text()='\n" +
                "                            文章标签：\n" +
                "                        ']/following-sibling::dd/span/a/text()").all();
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                .url(itemUrl)
                .releaseTime(releaseTimeToLong)
                .addContentKV(Field_Title, title)
                .addContentKV(Field_Author, author.trim())
                .addContentKV(Field_Author_Id, authorId)
                .addContentKV(Field_Content, contentBf.toString())
                .addContentKV(Field_Author_Follows, washNum(authorFollows))
                .build();
        crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type, topicType);
        return crawlerData;
    }

    private CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("."));
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        String comments = (String) crawlerRequestRecord.getHttpRequest().getExtras().get("comments");
        String views = httpPage.getHtml().xpath("//span[@class=\"browse_number\"]").get();
        views = StringUtils.isNotBlank(views) ? views.split("浏览")[0] : "0";
        String releaseTime = httpPage.getHtml().xpath("//span[@class=\"browse_time\"]/text()").get();
        long releaseTimeToLong = 0;
        try {
            releaseTimeToLong = DateUtils.parseDate(releaseTime,"yyyy-MM-dd").getTime();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                .url(itemUrl)
                .releaseTime(releaseTimeToLong)
                .addContentKV(Field_I_Views, washNum(views))
                .addContentKV(Field_I_Comments, comments)
                .build();
        return crawlerData;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();

        List<String> allReplies = httpPage.getJson().jsonPath($_type + ".list").all();
        for (String data : allReplies) {
            Map<String, String> parentMap = commentMap(data);
            crawlerDataList.addAll(commentData(crawlerRequestRecord,httpPage,parentMap));

            JSONObject child =  JSONObject.parseObject(data).getJSONObject("child");
            if(child != null){
                JSONArray list = child.getJSONArray("list");
                for (Object childData : list) {
                    JSONObject jsonObject = (JSONObject) childData;
                    String jsonString = jsonObject.toJSONString();
                    Map<String, String> childMap = commentMap(jsonString);
                    crawlerDataList.addAll(commentData(crawlerRequestRecord,httpPage,childMap));
                }
            }
        }
        return crawlerDataList;
    }


    private List<CrawlerData> commentData(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,Map<String,String> map){
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1,articleUrl.lastIndexOf("."));
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        try {
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.comment.enumVal(), map.get("commentId")))
                    .resultLabelTag(valueOf("comment"))
                    .releaseTime(washTime(map.get("releaseTime")))
                    .url(itemUrl)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .addContentKV(Field_Author, map.get("author"))
                    .addContentKV(Field_Author_Id, map.get("authorId"))
                    .addContentKV(Field_Content, map.get("content"))
                    .addContentKV(Field_Floor,map.get("floor"))
                    .build();

            crawlerDataList.add(crawlerData);

            CrawlerData crawlerDataInteraction = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.comment.enumVal(), map.get("commentId")))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.comment.enumVal(), interaction,map.get("commentId")))
                    .resultLabelTag(valueOf("interaction"))
                    .releaseTime(washTime(map.get("releaseTime")))
                    .url(itemUrl)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .addContentKV(Field_I_Likes, map.get("likes"))
                    .addContentKV(Field_I_Comments, map.get("comments"))
                    .build();
            crawlerDataList.add(crawlerDataInteraction);
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerDataList;
    }

    private Map<String,String> commentMap(String data){
        Map<String,String> commentMap = new HashMap<>();
        JSONObject jsonObject = JSONObject.parseObject(data);
        String commentId = jsonObject.getString("id");
        String author = jsonObject.getString("user_name");
        String authorId = jsonObject.getString("user_id");
        String floor = jsonObject.getString("layout");
        String releaseTime = jsonObject.getString("pub_time");
        String content = jsonObject.getString("conts");
        String likes = jsonObject.getString("ups");
        String comments = "0";
        JSONObject child = jsonObject.getJSONObject("child");
        if(child != null){
            List<String> list = child.getJSONArray("list").toJavaList(String.class);
            comments = String.valueOf(list.size());
        }
        commentMap.put("commentId",commentId);
        commentMap.put("author",author);
        commentMap.put("authorId",authorId);
        commentMap.put("releaseTime",releaseTime);
        commentMap.put("content",content);
        commentMap.put("likes",likes);
        commentMap.put("comments",comments);
        commentMap.put("floor",floor);
        return commentMap;
    }
    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 100) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {},crawler record[{}]", crawlerRecord.getHttpRequest().getUrl(),JSONObject.toJSONString(crawlerRecord));
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static long washTime(String time) throws ParseException {
        long releaseTimeToLong = 0;
        if (StringUtils.isBlank(time)) {
            return releaseTimeToLong;
        }

        long currentTime = System.currentTimeMillis();
        LocalDate now = LocalDate.now();
        if ("刚刚".equals(time)) {
            releaseTimeToLong = currentTime;
        }else if(time.matches("\\d+秒前")){
            String num = time.split("秒")[0];
            releaseTimeToLong = currentTime - Integer.parseInt(num) * MILLIS_PER_SECOND;
        } else if (time.matches("\\d+分钟前")) {
            String num = time.split("分钟")[0];
            releaseTimeToLong = currentTime - Integer.parseInt(num) * MILLIS_PER_MINUTE;
        } else if (time.matches("\\d+小时前")) {
            String num = time.split("小时")[0];
            releaseTimeToLong = currentTime - Integer.parseInt(num) * MILLIS_PER_HOUR;
        }else if(time.matches("昨天 \\d+点\\d+分")){
            String releaseTime = time.replace("昨天",now.minusDays(1).toString());
            releaseTimeToLong = DateUtils.parseDate(releaseTime,"yyyy-MM-dd hh点mm分").getTime();
        }else if(time.matches("前天 \\d+点\\d+分")){
            String releaseTime = time.replace("前天",now.minusDays(2).toString());
            releaseTimeToLong = DateUtils.parseDate(releaseTime,"yyyy-MM-dd hh点mm分").getTime();
        }else if(time.matches("\\d{2}月\\d{2}日")){
            String releaseTime = now.getYear() + time;
            releaseTimeToLong = DateUtils.parseDate(releaseTime,"yyyyMM月dd日").getTime();
        } else if (time.matches("\\d{4}-\\d{2}-\\d{2}")) {
            releaseTimeToLong = DateUtils.parseDate(time, "yyyy-MM-dd").getTime();
        } else if(time.matches("\\d{4}年\\d{2}月\\d{2}日")){
            releaseTimeToLong = DateUtils.parseDate(time,"yyyy年MM月dd日").getTime();
        }
        return releaseTimeToLong;
    }

    private static String washNum(String num) {
        String lastNum = "0";
        if (StringUtils.isBlank(num)) {
            return lastNum;
        }

        if (num.contains("万")) {
            lastNum = String.valueOf(Double.parseDouble(num.split("万")[0]) * 10000).split("\\.")[0];
        } else {
            lastNum = num;
        }
        return lastNum;
    }

    public static void main(String[] args) {
        String s = "前天 07点50分";
        try {
            long l = washTime(s);
            System.out.println(l);
        } catch (ParseException e) {
            e.printStackTrace();
        }
    }

}
