package com.chance.cc.crawler.development.scripts.acfun;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.pipeline.result.CrawlerDataBuilder;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Field_Author_Follows;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Topic_Type;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/3/16 13:58
 * @Description
 *      acfun关键词采集脚本
 **/
public class AcfunCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(AcfunCrawlerScript.class);
    private static final String DOMAIN = "acfun";
    private static final String SITE = "searchKw";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";

    private static final String PRIFIX = "https://www.acfun.cn";
    private static final String ENTRANCE_URL = "https://www.acfun.cn/search";
    private static final String SEARCH_URL = "https://www.acfun.cn/search\\?sortType=\\d+&channelId=\\d+&type=[a-z]+&keyword=[\\S\\ ]*";
    private static final String ITEM_URL = "https://www.acfun.cn/v/ac\\d+";
    private static final String COMMENT_SOURCE_URL = "https://www.acfun.cn/rest/pc-direct/comment/list?sourceId=%s&sourceType=3&page=1&pivotCommentId=0&newPivotCommentId=";
    private static final String COMMENT_URL = "https://www.acfun.cn/rest/pc-direct/comment/list\\S*";
    private static final String SUB_COMMENT_SOURCE_URL = "https://www.acfun.cn/rest/pc-direct/comment/sublist?sourceId=%s&sourceType=3&rootCommentId=%s&page=1";
    private static final String SUB_COMMENT_URL = "https://www.acfun.cn/rest/pc-direct/comment/sublist\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(SEARCH_URL);
        addUrlRegular(ITEM_URL);
        addUrlRegular(COMMENT_URL);
        addUrlRegular(SUB_COMMENT_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String siteTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return siteTag.equals(SITE);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() == 503) {
            log.error(DOMAIN + " page download error!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        if(requestUrl.matches(SEARCH_URL)){
            searchUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(ITEM_URL)){
            itemUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(COMMENT_URL) || requestUrl.matches(SUB_COMMENT_URL)){
            commentUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        return parsedLinks;
    }

    private void searchUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String keyword = (String) httpPage.getRequest().getExtras().get("keyword");
        String requestUrl = httpPage.getRequest().getUrl();
        String rawText = httpPage.getRawText();
        String[] htmls = rawText.split("<script type=\"text/javascript\">");
        for (String html : htmls) {
            if(!html.startsWith("bigPipe.onPageletArrive")){
                continue;
            }

            String[] split = html.split("</script>");
            try{
                Json json = new Json(split[0]).removePadding("bigPipe.onPageletArrive");
                String type =json .jsonPath($_type + ".id").get();
                if("pagelet_video".equals(type)){
                    Html htmlContent = new Html(json.jsonPath($_type + ".html").get());
                    httpPage.setHtml(htmlContent);
                    String totalNum = httpPage.getHtml().xpath("//span[@class=\"total-num\"]/@data-total").get();
                    List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"search-video\"]").nodes();
                    if(nodes == null ||(!requestUrl.contains("pCursor") && StringUtils.isNotBlank(totalNum) && "0".equals(totalNum) && nodes.size() < 1)){
                        log.error(keyword + " search result is 0!");
                        return;
                    }
                    //翻页
                    String requestId = httpPage.getHtml().xpath("//div[@id=\"video-list__pager\"]/@data-requestid").get();
                    if(StringUtils.isNotBlank(requestId)){
                        String nextUrl = "";
                        if(requestUrl.contains("pCursor")){
                            String[] split1 = requestUrl.split("pCursor=");
                            nextUrl = split1[0] + "pCursor=" + (Integer.parseInt(split1[1]) + 1);
                        }else{
                            nextUrl = requestUrl + "&requestId="+requestId+"&pCursor=2";
                        }
                        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(nextUrl)
                                .releaseTime(System.currentTimeMillis())
                                .copyBizTags()
                                .copyResultTags()
                                .build();
                        turnRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
                        parsedLinks.add(turnRecord);
                    }


                    //列表页解析
                    for (Selectable node : nodes) {
                        String itemUrl = node.xpath(".//div[@class=\"video__main__title\"]/a/@href").get();
                        if(StringUtils.isBlank(itemUrl)){
                            continue;
                        }else {
                            itemUrl = PRIFIX + itemUrl;
                        }

                        String releaseTime = node.xpath(".//span[@class=\"info__create-time\"]/text()").get();
                        if(StringUtils.isBlank(releaseTime)){
                            continue;
                        }
                        long releaseTimeToLong = DateUtils.parseDate(releaseTime,"yyyy-MM-dd").getTime();
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .httpUrl(itemUrl)
                                .recordKey(itemUrl + keyword)
                                .releaseTime(releaseTimeToLong)
                                .copyBizTags()
                                .copyResultTags()
                                .build();
                        parsedLinks.add(itemRecord);
                    }
                }
            }catch (Exception e){
                log.error(e.getMessage());
            }
        }
    }

    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (resultTags.hasDataType(comment)) {
            resultTags.getCategoryTag().removeLabelTag("comment");

            CrawlerRequestRecord filterRecord = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class);
            if (filterRecord == null) {
                log.error("filter record can not null !");
                return;
            }

            String comments = httpPage.getHtml().xpath("//div[@class=\"comment\"]/span/text()").get();
            //评论数要大于0
            if (StringUtils.isNotBlank(comments) && Integer.parseInt(comments.trim()) > 0) {
                String id = requestUrl.substring(requestUrl.lastIndexOf("ac") + 2);
                String url = String.format(COMMENT_SOURCE_URL, id);
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .needWashed(true)
                        .resultLabelTag(comment)
                        .build();
                commentRecord.setFilter(filterRecord.getFilter());
                commentRecord.setFilterInfos(filterRecord.getFilterInfos());
                commentRecord.getHttpRequest().addExtra("articleUrl", requestUrl);
                parsedLinks.add(commentRecord);
            }
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        try{
            httpPage.getJson().jsonPath($_type + ".totalPage");
        }catch (Exception e){
            log.error("comment url [{}] download is error!will retry!",crawlerRequestRecord.getHttpRequest().getUrl());
            requestAgainCrawlerRecord(parsedLinks,crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        //评论翻页
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        String sourceId = "";
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if ("page".equals(name)) {
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }
            if("sourceId".equals(name)){
                sourceId = value;
            }
        }
        nextUrl = nextUrl.substring(0, nextUrl.length() - 1);
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();
        commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        parsedLinks.add(commentRecord);

        if(requestUrl.matches(SUB_COMMENT_URL)){
            return;
        }
        //评论的回复
        List<String> all1 = httpPage.getJson().jsonPath($_type + ".rootComments").all();
        for (String data : all1) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            int subCommentCountFormat = jsonObject.getIntValue("subCommentCountFormat");
            if(subCommentCountFormat < 1){
                continue;
            }

            String commentId = jsonObject.getString("commentId");
            String floor = jsonObject.getString("floor");//获取数据的时候有问题
            long releaseTimeToLong = jsonObject.getLongValue("timestamp");
            String url = String.format(SUB_COMMENT_SOURCE_URL,sourceId,commentId);
            CrawlerRequestRecord subCommentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(releaseTimeToLong)
                    .copyBizTags()
                    .needWashed(true)
                    .copyResultTags()
                    .build();
            Map<String,Object> extras = new HashMap<>();
            extras.putAll(crawlerRequestRecord.getHttpRequest().getExtras());
            extras.put("replyFloor",floor);
            subCommentRecord.getHttpRequest().setExtras(extras);
            parsedLinks.add(subCommentRecord);
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        String url = page.getRequest().getUrl();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            if(url.matches(COMMENT_URL)){
                crawlerDataList.addAll(washComment(crawlerRecord, page));
            }else if(url.matches(SUB_COMMENT_URL)){
                crawlerDataList.addAll(washSubComment(crawlerRecord,page));
            }
        }

        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String title = httpPage.getHtml().xpath("//h1/span/text()").get();
        String author = httpPage.getHtml().xpath("//a[@class=\"up-name\"]/text()").get();
        String authorId = httpPage.getHtml().xpath("//a[@class=\"up-name\"]/@href").get();
        authorId = StringUtils.isNotBlank(authorId) ? authorId.substring(authorId.lastIndexOf("/") + 1) : "0";
        String follows = httpPage.getHtml().xpath("//div[@class='follow-up not-follow']/text()").get();
        follows = StringUtils.isNotBlank(follows) ? washNum(follows.split(" ")[1].trim()):"0";
        String releaseTime = httpPage.getHtml().xpath("//div[@class=\"publish-time\"]/text()").get();
        releaseTime = StringUtils.isNotBlank(releaseTime) ? releaseTime.split(" ")[1] : "";
        List<String> all = httpPage.getHtml().xpath("//div[contains(@class,'description-container')]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : all) {
            conents.append(articleText).append(" ");
        }
        List<String> tagList = httpPage.getHtml().xpath("//div[@class=\"tag\"]/span/a/text()").all();
        if(tagList != null && tagList.size() > 0){
            crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type,tagList);
        }

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(washTime(releaseTime))
                    .addContentKV(Field_Content, conents.toString())
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Author_Follows,follows)
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerData;
    }

    private CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String releaseTime = httpPage.getHtml().xpath("//div[@class=\"publish-time\"]/text()").get();
        releaseTime = StringUtils.isNotBlank(releaseTime) ? releaseTime.split(" ")[1] : "";
        String views = httpPage.getHtml().xpath("//span[@class=\"viewsCount\"]/text()").get();
        String danmu = httpPage.getHtml().xpath("//span[@class=\"danmuCount sp2\"]/text()").get();
        String likes = httpPage.getHtml().xpath("//span[@class=\"likeCount\"]/text()").get();
        String comments = httpPage.getHtml().xpath("//div[@class=\"comment\"]/span/text()").get();
        String collections = httpPage.getHtml().xpath("//span[@class=\"collectionCount\"]/text()").get();

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(washTime(releaseTime))
                    .addContentKV(Field_I_Comments, washNum(comments))
                    .addContentKV(Field_I_Views, washNum(views))
                    .addContentKV(Field_I_Likes, washNum(likes))
                    .addContentKV(Field_I_Collection,washNum(collections))
                    .addContentKV(Field_I_Bullet_Chat,washNum(danmu))
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerData;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1);

        List<String> commentList = httpPage.getJson().jsonPath($_type + ".rootComments").all();
        for (String data : commentList) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String commentId = jsonObject.getString("commentId");
            String author = jsonObject.getString("userName");
            String authorId = jsonObject.getString("userId");
            long timestamp = jsonObject.getLongValue("timestamp");
            String content = jsonObject.getString("content");
            String floor = jsonObject.getString("floor");
            String likes = jsonObject.getString("likeCount");
            String comments = jsonObject.getString("subCommentCount");

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentId))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                    .releaseTime(timestamp)
                    .url(requestUrl)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Floor, floor)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_I_Comments, comments)
                    .build();
            crawlerDataList.add(crawlerData);
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washSubComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String replyFloor = (String)httpPage.getRequest().getExtras().get("replyFloor");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1);

        List<String> commentList = httpPage.getJson().jsonPath($_type + ".subComments").all();
        for (String data : commentList) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String commentId = jsonObject.getString("commentId");
            String author = jsonObject.getString("userName");
            String authorId = jsonObject.getString("userId");
            long timestamp = jsonObject.getLongValue("timestamp");
            String content = jsonObject.getString("content");
            String floor = jsonObject.getString("floor");
            String likes = jsonObject.getString("likeCount");
            String replyTo = jsonObject.getString("replyTo");
            String replyToUserName = jsonObject.getString("replyToUserName");
            CrawlerData crawlerData = null;
            CrawlerDataBuilder builder = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), commentId))
                    .resultLabelTag(valueOf("comment"))
                    .releaseTime(timestamp)
                    .url(requestUrl)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Floor, floor)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_Reply_Floor, replyFloor);

            if(!"0".equals(replyTo)){
                builder.addContentKV(Field_Content,"回复@"+replyToUserName+" :"+content);
            }else{
                 builder.addContentKV(Field_Content, content);
            }
            crawlerData = builder.build();
            crawlerDataList.add(crawlerData);
        }
        return crawlerDataList;
    }

    private static String washNum(String text) {
        if (StringUtils.isBlank(text)) {
            return "0";
        }

        text = text.toLowerCase();
        if (text.contains("万")) {
            String[] split = text.split("万");
            String num = String.valueOf(Double.parseDouble(split[0].trim()) * 10000);
            return num.split("\\.")[0];
        }

        return text;
    }

    private static long washTime(String time) throws ParseException {
        if(StringUtils.isBlank(time)){
            return 0;
        }

        long timeToLong = System.currentTimeMillis();
        if(time.matches("\\d+小时前")){
            String[] split = time.split("小时前");
            timeToLong = timeToLong - Integer.parseInt(split[0]) * DateUtils.MILLIS_PER_HOUR;
        }else if(time.matches("\\d+分钟前")){
            String[] split = time.split("分钟前");
            timeToLong = timeToLong - Integer.parseInt(split[0]) * DateUtils.MILLIS_PER_MINUTE;
        }else if(time.matches("\\d+秒前")){
            String[] split = time.split("秒前");
            timeToLong = timeToLong - Integer.parseInt(split[0]) * DateUtils.MILLIS_PER_SECOND;
        }else if(time.matches("\\d{4}-\\d+-\\d+")){
            timeToLong = DateUtils.parseDate(time, "yyyy-MM-dd").getTime();
        }
        return timeToLong;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");
//            String keyword = "nike";

            try {
                String url = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                turnRecord.getHttpRequest().addExtra("keyword",keyword);
                crawlerRecords.add(turnRecord);
//                return;
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }
}
