package com.chance.cc.crawler.development.scripts.meipai;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField;
import com.chance.cc.crawler.development.scripts.tianya.TianyaCarSeriesCrawlerScript;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.time.LocalDate;
import java.util.*;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Field_Author_Follows;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/4/12 14:17
 * @Description 美拍视频
 **/
public class MeiPaiVideoCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(MeiPaiVideoCrawlerScript.class);//日志
    private static final String DOMAIN = "meipai";
    private static final String SITE = "video";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";

    //    https://www.meipai.com/medias/hot
    //    https://www.meipai.com/square/460
    private static final String VideoRegulars = "https://www.meipai.com/medias/hot";//热门首页链接
    private static final String VideoTwoRegulars = "https://www.meipai.com/square/\\S*";//热门首页链接
    //    https://www.meipai.com/home/hot_timeline?page=1
    //    https://www.meipai.com/squares/new_timeline?page=6&tid=16
    private static final String VideoNextRegulars = "https://www.meipai.com/home/hot_timeline\\?page=\\d*";//热门下一页链接
    private static final String VideoTwoNextRegulars = "https://www.meipai.com/squares/new_timeline\\?page=\\d*&tid=\\S*";//热门下一页链接

    //    https://www.meipai.com/media/6846980197415142888
    private static final String ContentRegulars = "https://www.meipai.com/media/\\S*";//内容详情链接

    //    https://www.meipai.com/medias/comments_timeline?page=2&id=6846980197415142888
    private static final String CommentRegulars = "https://www.meipai.com/medias/comments_timeline\\?page=\\d*&id=\\S*";//评论和下一页链接

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(VideoRegulars);
        addUrlRegular(VideoTwoRegulars);
        addUrlRegular(VideoNextRegulars);
        addUrlRegular(VideoTwoNextRegulars);
        addUrlRegular(ContentRegulars);
        addUrlRegular(CommentRegulars);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        String site = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return SITE.equals(site);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList();
        //        判断页面是否加载成功
        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        if (page.getStatusCode() != 200 || (!page.isDownloadSuccess())) {
            log.error("download page url == {null} error status is {}", requestUrl, page.getStatusCode());
            if (page.getStatusCode() != 404) {//如果没有成功的下载  进行重新下载
                this.requestAgainCrawlerRecord(parsedLinks, crawlerRecord);
                crawlerRecord.setNeedWashPage(false);
                return parsedLinks;
            }
        }
        if (crawlerRecord.getHttpRequest().getUrl().matches(VideoRegulars) || crawlerRecord.getHttpRequest().getUrl().matches(VideoTwoRegulars)) {
            homeRequest(crawlerRecord, page, parsedLinks);//解析内容链接
        } else if (crawlerRecord.getHttpRequest().getUrl().matches(VideoNextRegulars) || crawlerRecord.getHttpRequest().getUrl().matches(VideoTwoNextRegulars)) {
            contentRequest(crawlerRecord, page, parsedLinks);//解析内容下一页
        } else if (crawlerRecord.getHttpRequest().getUrl().matches(CommentRegulars)) {
            commentNextRequest(crawlerRecord, page, parsedLinks);//解析内容下一页
        }
        return parsedLinks;
    }

    //解析车系
    private void homeRequest(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parsedLinks) {
        String url = page.getRequest().getUrl();
        String hotUrl = null;
        if (url.matches(VideoRegulars)) {
            hotUrl = "https://www.meipai.com/home/hot_timeline?page=1";
        }
        if (url.matches(VideoTwoRegulars)) {
            String[] split = url.split("/");
            hotUrl = "https://www.meipai.com/squares/new_timeline?page=1&tid=" + split[split.length - 1];
        }
        if (StringUtils.isBlank(hotUrl)){
            return;
        }
        String module = page.getHtml().xpath("//a[@class=\"js-convert-emoji dbl current\"]/text()").get();
        if (module==null){
            module = page.getHtml().xpath("//a[@class=\"dbl current\"]/text()").get();
        }
        crawlerRecord.tagsCreator().bizTags().addCustomKV(AICCommonField.Tag_Site_Info,module);
        CrawlerRequestRecord contentRecord = CrawlerRequestRecord.builder()//解析资讯
                .itemPageRequest(crawlerRecord)
                .recordKey(hotUrl)
                .httpUrl(hotUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .needWashed(false)
                .build();
        parsedLinks.add(contentRecord);

    }

    //解析车系
    private void contentRequest(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parsedLinks) {
        String url = page.getRequest().getUrl();
        if (url.matches(VideoNextRegulars)) {
            String[] articleId = Pattern.compile("[^0-9]").matcher(url).replaceAll(" ").replaceAll("\\s+", " ").trim().split(" ");//url的关键字
            String nextPageUrl = "https://www.meipai.com/home/hot_timeline?page=" + (Integer.parseInt(articleId[0]) + 1);
            CrawlerRequestRecord contentRecord = CrawlerRequestRecord.builder()//解析资讯
                    .turnPageRequest(crawlerRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .needWashed(false)
                    .build();
            parsedLinks.add(contentRecord);

        }

        if (url.matches(VideoTwoNextRegulars)) {
            String[] split = url.replaceAll("page=", "/").replaceAll("&tid=", "/").split("/");
            String nextPageUrl = "https://www.meipai.com/squares/new_timeline?page=" + (Integer.parseInt(split[split.length - 2]) + 1) + "&tid=" + split[split.length - 1];
            CrawlerRequestRecord contentRecord = CrawlerRequestRecord.builder()//解析资讯
                    .turnPageRequest(crawlerRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .needWashed(false)
                    .build();
            parsedLinks.add(contentRecord);
        }

//        明细页解析
        List<Selectable> nodes = page.getJson().jsonPath($_type + "..medias[*]").nodes();//所有资讯
        for (Selectable node : nodes) {
            try {
                String contentUrlId = new Json(node.get()).jsonPath(".id").get();//内容链接
                if (StringUtils.isBlank(contentUrlId)) {
                    continue;
                }
                String contentUrl = "https://www.meipai.com/media/" + contentUrlId;

                String pubTimeStr = new Json(node.get()).jsonPath(".created_at").get();//时间
                long date = 0;
                try {
                    date = washTime(pubTimeStr);
                } catch (ParseException e) {
                    log.error(e.getMessage());
                }
                if (date == 0) {
                    log.error("date is null,url" + crawlerRecord.getHttpRequest().getUrl());
                    crawlerRecord.setNeedWashPage(false);
                    return;
                }

                crawlerRecord.tagsCreator().bizTags().addCustomKV("pubTime",date);
                crawlerRecord.tagsCreator().bizTags().addCustomKV(Field_Urls,contentUrl);

                String commentUrl = "https://www.meipai.com/medias/comments_timeline?page=1&id=" + contentUrlId;

                CrawlerRequestRecord contentRecord = CrawlerRequestRecord.builder()//解析资讯
                        .itemPageRequest(crawlerRecord)
                        .recordKey(contentUrl)
                        .httpUrl(contentUrl)
                        .releaseTime(date)
                        .copyBizTags()
                        .copyResultTags()
                        .needWashed(true)
                        .build();
                parsedLinks.add(contentRecord);

                KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()//解析评论
                        .itemPageRequest(crawlerRecord)
                        .recordKey(commentUrl)
                        .httpUrl(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .needWashed(true)
                        .build();
                commentRecord.setFilter(filterInfoRecord.getFilter());
                commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                parsedLinks.add(commentRecord);
            } catch (Exception e) {
                log.error(e.getMessage(), e);
            }
        }
    }


    //解析文章列表
    private void commentNextRequest(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parsedLinks) {
        try {
            //    https://www.meipai.com/medias/comments_timeline?page=2&id=6846980197415142888
            String url = page.getRequest().getUrl();
            String[] split = url.replaceAll("page=", "/").replaceAll("&id=", "/").split("/");
            String nextPageUrl = "https://www.meipai.com/medias/comments_timeline?page=" + (Integer.parseInt(split[split.length - 2]) + 1) + "&id=" + split[split.length - 1];

            KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()//解析评论
                    .turnPageRequest(crawlerRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .needWashed(true)
                    .build();
            commentRecord.setFilter(filterInfoRecord.getFilter());
            commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            parsedLinks.add(commentRecord);
        } catch (
                Exception e) {
            log.error(e.getMessage(), e);
        }
    }


    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (crawlerRecord.getHttpRequest().getUrl().matches(ContentRegulars)) {
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)) {//文章
                crawlerDataList.addAll(articleWash(crawlerRecord, page));
            }
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)) {//文章链接
                crawlerDataList.addAll(articleListWash(crawlerRecord, page));
            }
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(interaction)) {//文章互动量
                crawlerDataList.addAll(articleInteractionWash(crawlerRecord, page));
            }
        }
        if (crawlerRecord.getHttpRequest().getUrl().matches(CommentRegulars)) {
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(comment)) {//评论
                crawlerDataList.addAll(commentWash(crawlerRecord, page));
            }
        }
        return crawlerDataList;
    }

    //清洗文章链接
    public List<CrawlerData> articleListWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        log.info("清洗文章链接");
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        try {
            String articleUrl = page.getRequest().getUrl();//文章url
            String releaseTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("pubTime");
            long time = Long.parseLong(releaseTime);
            String module = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Tag_Site_Info);
            String[] split = articleUrl.split("/");
            String articleId = split[split.length - 1];
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE, article.enumVal(), articleId))
                    .resultLabelTag(article)
                    .url(articleUrl)
                    .releaseTime(time)
                    .addContentKV("releaseTimeToLong", time+"")
                    .addContentKV("articleUrl", articleUrl)
                    .addContentKV(AICCommonField.Tag_Site_Info, module)
                    .flowInPipelineTag("redis")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            crawlerArticleDataList.add(crawlerData);
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
        return crawlerArticleDataList;
    }

    //  文章互动量清洗
    private List<CrawlerData> articleInteractionWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        log.info("文章互动量清洗");
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        String articleUrl = page.getRequest().getUrl();
        String like = page.getHtml().xpath("//span[@class=\"pr top-2\"]/text()").get();//点赞数
        String comments = page.getHtml().xpath("//span[@id=\"commentCount\"]/text()").get();//评论数
//        if (comments.equals("评论")){
//            comments="0";
//        }
        String forwards = page.getHtml().xpath("//span[@class=\"pr top-3\"][2]/text()").get();//转发量
//        if (forwards==null){
//            forwards="0";
//        }
        String views = page.getHtml().xpath("//div[@class=\"detail-location\"]/text()[2]").get();//浏览量
//        views = Pattern.compile("[^0-9]").matcher(views).replaceAll(" ").replaceAll("\\s+", " ").trim();

        String releaseTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("pubTime");
        long time = Long.parseLong(releaseTime);
        try {
            String[] split = articleUrl.split("/");
            String articleId = split[split.length - 1];
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE, interaction.enumVal(), articleId))
                    .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE, article.enumVal(), articleId))
                    .resultLabelTag(interaction)
                    .url(articleUrl)
                    .releaseTime(time)
                    .addContentKV(AICCommonField.Field_I_Comments, washNum(comments))
                    .addContentKV(AICCommonField.Field_I_Likes, washNum(like))
                    .addContentKV(AICCommonField.Field_I_Forwards, washNum(forwards))
                    .addContentKV(Field_I_Views, washNum(views))
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            crawlerArticleDataList.add(crawlerData);
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
        return crawlerArticleDataList;
    }

    //解析文章
    public List<CrawlerData> articleWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        log.info("解析文章");
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        try {
            String articleUrl = page.getRequest().getUrl();//文章url
            String releaseTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("pubTime");
            long time = Long.parseLong(releaseTime);

            String title = page.getHtml().xpath("//h1/text()").get();//标题
            String authorName = page.getHtml().xpath("//h3[@class=\"detail-name pa\"]/a/text()").get().trim();//作者名
            String[] authorIdStr = page.getHtml().xpath("//div[@class=\"detail-r pr\"]/a/@href").get().split("/");//作者id
            String authorId = authorIdStr[authorIdStr.length - 1];
            List<String> texts = page.getHtml().xpath("//h1[@class=\"detail-description break js-convert-emoji js-detail-desc\"]/text()").all();//文章文本
            List<String> tag = page.getHtml().xpath("//a[@class=\"f-a-topic\"]/text()").all();//标签
            StringBuffer conents = new StringBuffer();//将文本拼接
            for (String text : texts) {
                conents.append(text).append(" ");
            }
            StringBuffer tags = new StringBuffer();//拼接标签
            for (String text : tag) {
                tags.append(text).append(" ");
            }
            String[] split = articleUrl.split("/");//关键字
            String articleId = split[split.length - 1];
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE, article.enumVal(), articleId))
                    .resultLabelTag(article)
                    .url(articleUrl)
                    .releaseTime(time)
                    .addContentKV(AICCommonField.Field_Content, conents.toString().trim())
                    .addContentKV(AICCommonField.Field_Title, title.trim())
                    .addContentKV(AICCommonField.Field_Author, authorName)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(AutoVMCommonField.Tag_Field_Topic_Type, tag.toString().trim())
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            crawlerArticleDataList.add(crawlerData);
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
        return crawlerArticleDataList;
    }


    //  评论清洗
    private Collection<? extends CrawlerData> commentWash(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        log.info("评论清洗");
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
            List<Selectable> nodes = page.getJson().jsonPath($_type + ".[*]").nodes();
            for (Selectable node : nodes) {
                try {
                    String content = new Json(node.get()).jsonPath(".content_origin").get();//评论内容
                    String pubTimeStr = new Json(node.get()).jsonPath(".created_at").get();//回复时间
//                    String year = String.valueOf(Calendar.getInstance().get(Calendar.YEAR));
//                    String pubTime = year + "-" + pubTimeStr;
                    String user = new Json(node.get()).jsonPath(".screen_name").get();//评论用户
                    String userId = new Json(node.get()).jsonPath(".uid").get();//评论用户id
                    String commentId = new Json(node.get()).jsonPath(".id").get();//评论id
                    String articleId = new Json(node.get()).jsonPath(".media_id").get();//文章id
                    String articleUrl = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(Field_Urls);


                    CrawlerData crawlerData = CrawlerData.builder()
                            .data(crawlerRecord, page)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE, comment.enumVal(), commentId))
                            .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), SITE, article.enumVal(), articleId))
                            .resultLabelTag(comment)
                            .url(articleUrl)
                            .releaseTime(washTime(pubTimeStr))
                            .addContentKV(AICCommonField.Field_Content, content)
                            .addContentKV(AICCommonField.Field_Author, user)
                            .addContentKV(AICCommonField.Field_Author_Id, userId)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .flowInPipelineTag("kafka")
                            .build();
                    crawlerData.setFilterPipelineResult(true);
                    crawlerArticleDataList.add(crawlerData);
                } catch (Exception e) {
                    log.error(e.getMessage(), e);
                }
            }
        return crawlerArticleDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {
    }

    /*
     * 重新下载
     * */
    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 10) {
                log.error("pcauto download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static long washTime(String time) throws ParseException {
        long timeToLong = 0;
        if (StringUtils.isBlank(time)) {
            return timeToLong;
        }

        long currentTimeMillis = System.currentTimeMillis();
        if (time.contains("分钟前")) {
            String[] split = time.split("分钟");
            return currentTimeMillis - (DateUtils.MILLIS_PER_MINUTE * Integer.parseInt(split[0]));
        }
        if (time.contains("今天")) {
            String[] split = time.split("今天");
            String year = String.valueOf(Calendar.getInstance().get(Calendar.YEAR));
            String month = String.valueOf(Calendar.getInstance().get(Calendar.MONTH)+1);
            String date = String.valueOf(Calendar.getInstance().get(Calendar.DATE));
            String pubTime = year + "-" + month + "-" + date + " " + split[1];
            long times = DateUtils.parseDate(pubTime, "yyyy-MM-dd HH:mm").getTime();

            return times;
        }

        LocalDate now = LocalDate.now();
        if (time.matches("\\d{2}-\\d{2} \\d{2}:\\d{2}")) {
            time = now.getYear() + time;
        }

        if (time.matches("\\d{2}-\\d{2}-\\d{2} \\d{2}:\\d{2}")) {
            String year = String.valueOf(now.getYear());
            time = year.substring(0, 2) + time;
        }

        timeToLong = DateUtils.parseDate(time, "yyyyMM-dd HH:mm", "yyyy-MM-dd HH:mm").getTime();
        return timeToLong;
    }

    private static String washNum(String num) {
        if (StringUtils.isBlank(num)) {
            return "0";
        }

        if (num.contains("播放")) {
            num = num.replace("播放", "");
        }
        num = num.trim();

        if ("分享".equals(num) || "评论".equals(num) || "喜欢".equals(num)) {
            return "0";
        }

        if (num.contains("万")) {
            String[] split = num.split("万");
            String numToWash = String.valueOf(Double.parseDouble(split[0]) * 10000);
            return numToWash.split("\\.")[0];
        } else {
            return num;
        }
    }


}
