package com.chance.cc.crawler.development.scripts.jiemian.car;

import com.alibaba.fastjson.JSON;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.List;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

public class JiemianCarCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(JiemianCarCrawlerScript.class);

    public static final String listUrlPrefix = "https://www.jiemian.com/lists/51.html";

    public static final String nextPage = "https://a.jiemian.com/index.php\\?m=lists&a=cLists&id=194&type=card&page=\\d*";

    public static final String specificPage = "https://www.jiemian.com/article/\\d*.html";

    public static final String commentUrl = "https://a.jiemian.com/index.php\\?m=comment&a=getlistCommentP&aid=\\d{7}&page=\\d*&comment_type=1&per_page=5";

    public static final String viewsUrl = "https://a.jiemian.com/index.php\\?m=article&a=getArticleP&aid=\\d*";

    int next = 2;

    private static final String scriptSite = "car";
    private static final String DOMAIN = "jiemian";

    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "jiemian";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(listUrlPrefix);
        addUrlRegular(nextPage);
        addUrlRegular(specificPage);
        addUrlRegular(commentUrl);
        addUrlRegular(viewsUrl);
    }


    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    //CrawlerRequestRecord  输入管道  放进去链接
    //parseLinks  往管道放链接
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        //创建
        List<CrawlerRequestRecord> parseLinksList = new ArrayList<CrawlerRequestRecord>();

        if (httpPage.getStatusCode() != 200 || (!httpPage.isDownloadSuccess())) {
            log.error("download page url == {} error status is {}", httpPage.getRequest().getUrl(), httpPage.getStatusCode());
            //如果没有成功的下载  进行重新下载
            this.requestAgainCrawlerRecord(parseLinksList, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parseLinksList;
        }

        String RequestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (RequestUrl.matches(listUrlPrefix)) {
            //明细页解析    //获取每一个链接
            return parseArticleLinks(crawlerRequestRecord, httpPage, parseLinksList);
        }
        if (RequestUrl.matches(nextPage)) {
            return parseListLinks(crawlerRequestRecord, httpPage, parseLinksList);
        }
        if (RequestUrl.matches(commentUrl)) {
            return parseCommentLinks(crawlerRequestRecord, httpPage, parseLinksList);
        }

        return parseLinksList;

    }



    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinksList) {

        String url = httpPage.getRequest().getUrl();
        String articleKey = url.substring(url.lastIndexOf("&aid=") + 5, url.lastIndexOf("&page="));
        String page = url.substring(url.lastIndexOf("&page=") + 6, url.lastIndexOf("&comment_type"));

        String rawText = httpPage.getRawText();
        String substring = rawText.substring(rawText.lastIndexOf("\"page_count\":\"") + 14);
        String pageCount = substring.substring(0, substring.indexOf("}") - 1);

        int page_count = 1;

        if (Integer.parseInt(pageCount) > Integer.parseInt(page)) {
            KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);

            page_count = Integer.parseInt(page) + 1;
            String commentUrl = "https://a.jiemian.com/index.php?m=comment&a=getlistCommentP&aid=" + articleKey + "&page=" + page_count + "&comment_type=1&per_page=5";
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .notFilterRecord()
                    .releaseTime(System.currentTimeMillis())
                    .resultLabelTag(comment)
                    .copyBizTags()
                    .build();
            commentRecord.setNeedWashPage(true);
            commentRecord.setFilter(filterInfoRecord.getFilter());
            commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            parseLinksList.add(commentRecord);

        }
        return parseLinksList;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinksList) {

        String rawText = httpPage.getRawText();
        String substring = rawText.substring(9);
        String text = substring.split("\",\"hideLo")[0];

        //得到的是乱码，把乱码转化回来
        String newText = StringEscapeUtils.unescapeJava(text);

        //获取所有的链接  一个一个进行解析
        List<Selectable> nodes = new Html(newText).xpath("//div[@class=\"news-view left card\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath("./div/div/h3/a/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            }
            //这个时间是要过滤的那个时间 七天之内
            String pubTime = node.xpath("./div/div/p/span[2]").get();
            pubTime = getPubTime(pubTime);

            if (StringUtils.isBlank(pubTime)) {
                continue;
            }

            try {
                long time = DateUtils.parseDate(pubTime, "yyyy/MM/dd HH:mm").getTime();

                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .needParsed(false)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(time)
                        .resultLabelTag(article)
                        .copyBizTags()
                        .build();
                itemRecord.setNeedWashPage(true);
                parseLinksList.add(itemRecord);

                //互动量
                String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
                String viewsUrl = "https://a.jiemian.com/index.php?m=article&a=getArticleP&aid=" + articleKey;
                CrawlerRequestRecord viewsRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .needParsed(false)
                        .httpUrl(viewsUrl)
                        .recordKey(viewsUrl)
                        .releaseTime(time)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                viewsRecord.setNeedWashPage(true);
                parseLinksList.add(viewsRecord);

                KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);

                //评论
                String commentUrl = "https://a.jiemian.com/index.php?m=comment&a=getlistCommentP&aid=" + articleKey + "&page=1&comment_type=1&per_page=5";
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .notFilterRecord()
                        .releaseTime(time)
                        .resultLabelTag(comment)
                        .copyBizTags()
                        .build();
                commentRecord.setNeedWashPage(true);
                commentRecord.setFilter(filterInfoRecord.getFilter());
                commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                parseLinksList.add(commentRecord);

            } catch (Exception e) {
                e.printStackTrace();
            }

        }
        //下一页的链接
        String url = httpPage.getRequest().getUrl();
        String substring1 = url.substring(url.lastIndexOf("=") + 1);
        int next = Integer.parseInt(substring1) + 1;

        String nextPageUrl = "https://a.jiemian.com/index.php?m=lists&a=cLists&id=194&type=card&page=" + next;
        CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .recordKey(nextPageUrl)
                .httpUrl(nextPageUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();

        parseLinksList.add(turnPageRequest);


        return parseLinksList;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinksList) {
        //获取每一个链接
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"news-view left card\"]").nodes();

        for (Selectable node : nodes) {
            String itemUrl = node.xpath("./div/div/h3/a/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            }

            String pubTime = node.xpath("./div/div/p/span[2]").get();
            pubTime = getPubTime(pubTime);

            if (StringUtils.isBlank(pubTime)) {
                continue;
            }


            try {
                long time = DateUtils.parseDate(pubTime, "yyyy/MM/dd HH:mm").getTime();

                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .needParsed(false)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(time)
                        .resultLabelTag(article)
                        .copyBizTags()
                        .build();
                itemRecord.setNeedWashPage(true);
                parseLinksList.add(itemRecord);

                //互动量
                String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
                String viewsUrl = "https://a.jiemian.com/index.php?m=article&a=getArticleP&aid=" + articleKey;
                CrawlerRequestRecord viewsRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .needParsed(false)
                        .httpUrl(viewsUrl)
                        .recordKey(viewsUrl)
                        .releaseTime(time)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                viewsRecord.setNeedWashPage(true);
                parseLinksList.add(viewsRecord);


                KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);

                //评论
                String commentUrl = "https://a.jiemian.com/index.php?m=comment&a=getlistCommentP&aid=" + articleKey + "&page=1&comment_type=1&per_page=5";
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .copyBizTags()
                        .notFilterRecord()
                        .releaseTime(System.currentTimeMillis())
                        .resultLabelTag(comment)
                        .build();
                commentRecord.setNeedWashPage(true);
                commentRecord.setFilter(filterInfoRecord.getFilter());
                commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());

                parseLinksList.add(commentRecord);

            } catch (Exception e) {
                e.printStackTrace();
            }

        }

        String nextPageUrl = "https://a.jiemian.com/index.php?m=lists&a=cLists&id=194&type=card&page=" + next;

        CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .recordKey(nextPageUrl)
                .httpUrl(nextPageUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();

        parseLinksList.add(turnPageRequest);

        return parseLinksList;
    }


    private String getPubTime(String pubTime) {

        if (pubTime.contains("分钟前")) {
            String time = pubTime.split("分")[0];
            long l = System.currentTimeMillis();
            int i = Integer.parseInt(time) * 60 * 1000;
            long l1 = l - i;
            SimpleDateFormat format = new SimpleDateFormat("yyyy/MM/dd HH:mm");
            pubTime = format.format(new Date(l1));
        } else if (pubTime.contains("今天")) {
            String today = pubTime.split("今天")[1];
            Calendar date = Calendar.getInstance();//获取时间
            String year = String.valueOf(date.get(Calendar.YEAR));
            String month = String.valueOf(date.get(Calendar.MONTH) + 1);
            String day = String.valueOf(date.get(Calendar.DATE));
            pubTime = year + "/" + month + "/" + day + " " + today;
        } else if (pubTime.contains("昨天")) {
            String yesterday = pubTime.split("昨天")[1];
            Calendar date = Calendar.getInstance();//获取时间
            String year = String.valueOf(date.get(Calendar.YEAR));
            String month = String.valueOf(date.get(Calendar.MONTH) + 1);
            String day = String.valueOf(date.get(Calendar.DATE) - 1);
            pubTime = year + "/" + month + "/" + day + " " + yesterday;
        } else {
            //if (!pubTime.contains("分钟前")||!pubTime.contains("今天")||!pubTime.contains("昨天"))
            Calendar date = Calendar.getInstance();//获取时间
            String year = String.valueOf(date.get(Calendar.YEAR));
            pubTime = year + "/" + pubTime;

        }
        return pubTime;
    }


    //CrawlerData 输出管道  输出需要的内容
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<CrawlerData>();

        //匹配标签 看需要进入哪个方法
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)) {
            return washComment(crawlerRequestRecord, httpPage);
        }

        return crawlerDataList;
    }

    //匹配的方法
    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        try {

            //https://www.jiemian.com/article/6623693.html
            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];

            //获取标题下的文本
            String text = httpPage.getHtml().xpath("//div[@class=\"article-header\"]//p//text()").get();

            //获取文本
            List<String> allText = httpPage.getHtml().xpath("//div[@class=\"article-content\"]//p//text()").all();

            //获取时间
            String releaseTimeStr = httpPage.getHtml().xpath("//div[@class=\"article-info\"]/p/span[2]").get();

            //拼接
            StringBuffer contents = new StringBuffer();
            for (String s : allText) {
                contents.append(s);
            }
            String newText = text + contents;

            //获取图片
            List<String> pictureList = httpPage.getHtml().xpath("//div[@class=\"article-main\"]//img/@src").all();
            StringBuffer pictures = new StringBuffer();
            for (String picture : pictureList) {
                if (picture.contains("https:")) {
                    pictures.append(picture).append("/0X1");
                } else {
                    pictures.append("https:").append(picture).append("/0X1");
                }
            }

            //获取标题
            String title = httpPage.getHtml().xpath("//div[@class=\"article-header\"]/h1").get();
            //获取作者
            String author = httpPage.getHtml().xpath("//span[@class=\"author\"]/a").get();
            if (author == null) {
                author = "";
            }

            //获取作者id
            String author_id = "";
            String id = httpPage.getHtml().xpath("//span[@class=\"author\"]/a/@href").get();
            if (id != null) {
                author_id = id.substring(id.lastIndexOf("=") + 1);
            }

            //洗出内容
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTimeStr, "yyyy/MM/dd HH:mm").getTime())
                    .addContentKV(AICCommonField.Field_Content, newText.trim())
                    .addContentKV(AICCommonField.Field_Title, title.trim())
                    .addContentKV(AICCommonField.Field_Author, author.trim())
                    .addContentKV(Field_Author_Id, author_id.trim())
                    .addContentKV(AICCommonField.Field_Images, pictures.toString().trim())
                    .build();

            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage(), e);
        }

        return null;
    }


    private CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("=") + 1).split("\\.")[0];
        //使用json获取数据
        String views = httpPage.getJson().removePadding("jsonpReturn").jsonPath($_type + ".tongjiarr.hit").get();
        String viewsCount = null;
        if (views.contains("w")) {
            String count = views.substring(0, views.lastIndexOf("w"));
            if (count.contains(".")) {
                viewsCount = Integer.parseInt(count.replace(".", "")) * 1000 + "";
            } else {
                viewsCount = Integer.parseInt(count) * 10000 + "";
            }
        }

        String commentCount = httpPage.getJson().removePadding("jsonpReturn").jsonPath($_type + ".tongjiarr.count").get();

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                .addContentKV(AICCommonField.Field_I_Views, viewsCount)
                .addContentKV(AICCommonField.Field_I_Comments, commentCount)
                .releaseTime(System.currentTimeMillis())
                .build();

        return crawlerData;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        //获取文章id
        String substring = crawlerRequestRecord.getHttpRequest().getUrl().substring(64);
        String articleKey = substring.split("&page")[0];


        List<CrawlerData> crawlerDataList = new ArrayList<>();

        String rawText = httpPage.getRawText();
        String text = rawText.substring(59);
        String newText = text.split("\",\"count\"")[0];
        String context = StringEscapeUtils.unescapeJava(newText);
        List<Selectable> nodes = new Html(context).xpath("//dd[@class=\"comment-post\"]").nodes();


        for (Selectable node : nodes) {

            String commentId = node.xpath("./@id").get();
            String author = node.xpath(".//div[@class=\"comment-body\"]/a/text()").get();
            String content = node.xpath(".//div[@class=\"comment-body\"]/div[@class=\"comment-main\"]/p/text()").get();
            String releaseTime = node.xpath(".//div[@class=\"comment-footer\"]/span[1]").get();


            //获取回复的点赞
            String likeCount = node.xpath(".//span[@class=\"like\"]/em").get();
            //获取回复的回复数
            String commentCount = node.xpath(".//span[@class=\"comment\"]/em").get();
            //获取回复的回复
            if (!"(0)".equals(commentCount)) {
                List<Selectable> replyNodes = node.xpath(".//div[@class=\"report-view\"]").nodes();
                for (Selectable replyNode : replyNodes) {
                    String s1 = replyNode.xpath("./div/@id").get();
                    String parentId = s1.substring(s1.lastIndexOf("ew") + 2);
                    parentId = "comment_" + parentId;

                    String s2 = replyNode.xpath(".//li/@id").get();
                    String replyId = s2.substring(s2.lastIndexOf("t_" + 2));

                    String replyName = replyNode.xpath(".//div[@class=\"comment-body\"]/a").get();
                    String replyText = replyNode.xpath(".//div[@class=\"comment-main\"]/p/text()").get();
                    String date = node.xpath(".//span[@class=\"date\"]/text()").get();

                    String time = getTime(date);

                    CrawlerData crawlerData = null;
                    try {
                        crawlerData = CrawlerData.builder()
                                .data(crawlerRequestRecord, httpPage)
                                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), replyId))
                                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), parentId))
                                .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                                .releaseTime(DateUtils.parseDate(time, "yyyy/MM/dd HH:mm").getTime())
                                .addContentKV(Field_Author, replyName)
                                .addContentKV(Field_Author_Id, replyId)
                                .addContentKV(Field_Content, replyText)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                                .build();

                    } catch (ParseException e) {
                        e.printStackTrace();
                    }

                    crawlerDataList.add(crawlerData);
                }

            }

            releaseTime = getTime(releaseTime);

            CrawlerData crawlerData = null;
            try {
                crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentId))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy/MM/dd HH:mm").getTime())
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Author_Id, commentId)
                        .addContentKV(Field_Content, content.trim())
                        .addContentKV(Field_I_Likes, likeCount)
                        .addContentKV(Field_I_Comments, commentCount)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .build();

            } catch (ParseException e) {
                e.printStackTrace();
            }

            crawlerDataList.add(crawlerData);
        }

        return crawlerDataList;
    }


    private String getTime(String releaseTime) {
        if (releaseTime.contains("天前")) {
            String time = releaseTime.substring(0, releaseTime.lastIndexOf("天"));
            long l = System.currentTimeMillis() - (Integer.parseInt(time) * 24 * 60 * 60 * 1000);
            SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm");
            releaseTime = simpleDateFormat.format(new Date(l));
        }
        if (releaseTime.contains("小时前")) {
            String time = releaseTime.substring(0, releaseTime.lastIndexOf("小时"));
            long l = System.currentTimeMillis() - (Integer.parseInt(time) * 60 * 60 * 1000);
            SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm");
            releaseTime = simpleDateFormat.format(new Date(l));
        }
        if (releaseTime.contains("分钟前")) {
            String time = releaseTime.substring(0, releaseTime.lastIndexOf("分钟"));
            long l = System.currentTimeMillis() - (Integer.parseInt(time) * 60 * 1000);
            SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm");
            releaseTime = simpleDateFormat.format(new Date(l));
        }
        return releaseTime;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }
    /*
     * 重新下载
     * */
    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 10) {
                log.error("pcauto download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

}
