package com.chance.cc.crawler.development.scripts.hualongxiang;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.Header;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static org.apache.commons.lang3.time.DateUtils.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/2/24 9:49
 * @Description 化龙巷采集脚本
 **/
public class HualongxiangCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(HualongxiangCrawlerScript.class);
    private static final String REQUEST_AGAIN_TAG = "hualongxiang_request_again";

    private static final String SEARCH_ENTRANCE_URL = "http://so.hualongxiang.com/";
    private static final String SEARCH_URL = "http://so.hualongxiang.com/\\S+";

    private static final String HUALONGXIANG_SUFFIX = "security_verify_data=313533362c383634";
    private static final String HUALONGXIANG_PRIFIX = "http://so.hualongxiang.com";
    private static final String ITEM_ENTRNCE_URL = "http://www.hualongxiang.com/[a-z]*/\\d+";
    private static final String ITEM_URL = "http://www.hualongxiang.com/[a-z]*/\\d+\\?"+HUALONGXIANG_SUFFIX;
    private static final String COMMENT_PRIFIX = "http://www.hualongxiang.com/";
    private static final String COMMENT_ENTRANCE_URL = "http://www.hualongxiang.com/read.php\\?tid=\\d+&pd=\\S*&page=\\d+#comment|http://www.hualongxiang.com/[a-z]*/\\d+#comment";
    private static final String COMMENT_URL = "http://www.hualongxiang.com/\\S*" + HUALONGXIANG_SUFFIX + "#comment";
    private static final String TURN_COMMENT_URL = "http://www.hualongxiang.com/read.php\\?tid=\\d+&pd=\\S*&page=\\S*#comment";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "hualongxiang";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(SEARCH_ENTRANCE_URL);
        addUrlRegular(SEARCH_URL);
        addUrlRegular(ITEM_ENTRNCE_URL);
        addUrlRegular(ITEM_URL);
        addUrlRegular(COMMENT_ENTRANCE_URL);
        addUrlRegular(COMMENT_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();

        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }

        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess()) {
            log.error("hualongxiang page download error!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        if (requestUrl.matches(SEARCH_URL)) {
            searchUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }else if(requestUrl.matches(ITEM_ENTRNCE_URL) || requestUrl.matches(ITEM_URL) || requestUrl.matches(COMMENT_ENTRANCE_URL) || requestUrl.matches(COMMENT_URL)){
            itemRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        return parsedLinks;
    }


    private void searchUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //翻页
        String nextPage = httpPage.getHtml().xpath("//a[text()='下一页']/@href").get();
        if (StringUtils.isNotBlank(nextPage)) {
            nextPage = HUALONGXIANG_PRIFIX + StringEscapeUtils.unescapeHtml(nextPage);
            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPage)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(turnRecord);
        }

        //列表页解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"shopper-list-long\"]/ul/li").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath("./div[@class=\"title\"]/a/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            }

            String releaseTime = node.xpath("./div[@class=\"time\"]/span/text()").all().get(1);
            if(StringUtils.isBlank(releaseTime)){
                continue;
            }else{
                releaseTime = releaseTime.replace("-\n","\n").trim();
            }

            long releaseTimeToLong = washTime(releaseTime);
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .needWashed(false)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(itemRecord);
        }
    }

    private void itemRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        Header[] responseHeaders = httpPage.getResponseHeaders();
        String requestUrl = httpPage.getRequest().getUrl();
        List<String> names = new ArrayList<>();
        for (Header responseHeader : responseHeaders) {
            if(!responseHeader.getName().equals("Set-Cookie")){
                continue;
            }
            names.add(responseHeader.getName());
        }

        if(names.size() > 1){
            //页面获取成功
            crawlerRequestRecord.setNeedWashPage(true);
            if(requestUrl.matches(ITEM_URL) || requestUrl.matches(ITEM_ENTRNCE_URL)){
                if(requestUrl.matches(ITEM_URL)){
                    crawlerRequestRecord.getHttpRequest().setUrl(requestUrl.split("\\?")[0]);
                }
                itemCommentRecord(crawlerRequestRecord,httpPage,parsedLinks);
            }else if(requestUrl.matches(TURN_COMMENT_URL)){
                commentUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
            }
            return;
        }

        String url = "";
        if(requestUrl.matches(ITEM_ENTRNCE_URL)){
            url = StringUtils.joinWith("?",requestUrl,HUALONGXIANG_SUFFIX);
        }else if(requestUrl.matches(COMMENT_ENTRANCE_URL)){
            url = requestUrl.contains("?") ? StringUtils.joinWith("&", requestUrl.split("#")[0],HUALONGXIANG_SUFFIX) : StringUtils.joinWith("?", requestUrl.split("#")[0],HUALONGXIANG_SUFFIX);
            url = url + "#comment";
        }else{
            url = requestUrl;
        }

        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        itemRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());

        if(names.contains("Set-Cookie")){
            String cookie = crawlerRequestRecord.getHttpRequest().getHeaders().get("Cookie");
            for (Header responseHeader : responseHeaders) {
                String name = responseHeader.getName();
                String value = responseHeader.getValue();
                if(!"Set-Cookie".equals(name)){
                    continue;
                }
                value = value.split(";")[0];
                if(value.contains("security_session_verify=")){
                    cookie = value;
                }else if(value.contains("security_session_mid_verify=")){
                    cookie = StringUtils.joinWith(";",cookie,value);
                }
            }
            itemRecord.getHttpRequest().addHeader("Cookie",cookie);
        }
        parsedLinks.add(itemRecord);
    }

    private void itemCommentRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        String requestUrl = httpPage.getRequest().getUrl();
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if(resultTags.hasDataType(comment)){
            resultTags.getCategoryTag().removeLabelTag("comment");

            CrawlerRequestRecord filterRecord = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class);
            if(filterRecord == null){
                log.error("filter record can not null !");
                return;
            }

            String comments = httpPage.getHtml().xpath("//em[text()='回复']/following-sibling::span/text()").get();
            //评论数要大于0
            if(StringUtils.isNotBlank(comments) && Integer.parseInt(comments) > 0){
                //查找最后一页的评论
                String text = httpPage.getHtml().xpath("//div[@class=\"page pagenumber\"]/a[last()]/text()").get();
                String lastUrl = "";
                if(StringUtils.isNotBlank(text)){
                    if(text.equals("下一页")){
                        lastUrl = httpPage.getHtml().xpath("//div[@class=\"page pagenumber\"]/a[last()-1]/@href").get();
                    }else if(text.matches("\\S*\\d+\\S*")){
                        lastUrl = httpPage.getHtml().xpath("//div[@class=\"page pagenumber\"]/a[last()]/@href").get();
                    }
                }
                if(StringUtils.isNotBlank(lastUrl)){
                    lastUrl = COMMENT_PRIFIX + StringEscapeUtils.unescapeHtml(lastUrl) + "#comment";
                }else{
                    lastUrl = requestUrl + "#comment";
                }
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(lastUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .resultLabelTag(comment)
                        .build();
                commentRecord.setFilter(filterRecord.getFilter());
                commentRecord.setFilterInfos(filterRecord.getFilterInfos());
                commentRecord.getHttpRequest().addExtra("articleUrl",requestUrl);
                parsedLinks.add(commentRecord);
            }
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        //评论翻页
        String url = httpPage.getHtml().xpath("//div[@class=\"page pagenumber\"]/b/preceding-sibling::a[1]/@href").get();
        if(StringUtils.isBlank(url)){
            return;
        }else{
            url = COMMENT_PRIFIX + StringEscapeUtils.unescapeHtml(url) + "#comment";
        }

        String lastReleaseTime = httpPage.getHtml().xpath("//div[@class=\"read_t\"]/table[1]//p[@class=\"s6\"]").get();
        try {
            long longTime = DateUtils.parseDate(lastReleaseTime,"发表于 yyyy-MM-dd HH:mm").getTime();
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(longTime)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
            parsedLinks.add(commentRecord);
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord,page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String title = httpPage.getHtml().xpath("//div[@class=\"title_name\"]/h2/a/text()").get();
        String author = httpPage.getHtml().xpath("//div[@class=\"read_t\"]/table[1]//span[@class=\"b fl\"]/text()").get();
        String releaseTime = httpPage.getHtml().xpath("//div[@class=\"read_t\"]/table[1]//div/p[@title]/text()").get();
        List<String> all = httpPage.getHtml().xpath("//div[@class=\"read_t\"]/table[1]//div[@id=\"read_tpc\"]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : all) {
            if (StringUtils.isBlank(articleText) || (StringUtils.isNotBlank(articleText) && articleText.contains("window.open"))) {
                continue;
            }
            conents.append(articleText).append(" ");
        }

        CrawlerData crawlerData = null;
        try {
            long releaseTimeToLong = StringUtils.isNotEmpty(releaseTime) ? DateUtils.parseDate(releaseTime, "发表于：yyyy-MM-dd HH:mm").getTime() : crawlerRequestRecord.getReleaseTime();
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_Content, conents.toString())
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerData;
    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String views = httpPage.getHtml().xpath("//em[text()='阅读']/following-sibling::span/text()").get();
        views = StringUtils.isNotBlank(views) ? views : "0";
        String comments = httpPage.getHtml().xpath("//em[text()='回复']/following-sibling::span/text()").get();
        comments = StringUtils.isNotBlank(comments) ? comments : "0";
        String releaseTime = httpPage.getHtml().xpath("//div[@class=\"read_t\"]/table[1]//div/p[@title]/text()").get();

        try {
            Long releaseTimeToLong = StringUtils.isNotEmpty(releaseTime) ? DateUtils.parseDate(releaseTime, "发表于：yyyy-MM-dd HH:mm").getTime() : crawlerRequestRecord.getReleaseTime();
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_I_Comments, comments)
                    .addContentKV(Field_I_Views, views)
                    .build();

            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return null;

    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1);

        List<Selectable> nodes = httpPage.getHtml().xpath("//table[@class=\"floot\"]").nodes();
        for (int i = nodes.size()-1; i >= 0 ; i--) {
            Selectable node = nodes.get(i);
            String author = node.xpath(".//span[@class=\"b fl\"]/text()").get();
            List<String> all = node.xpath(".//div[@class=\"fs16 mb10\"]/div[@class=\"read-content\"]//text()").all();
            StringBuffer conents = new StringBuffer();
            for (String articleText : all) {
                if (StringUtils.isBlank(articleText)) {
                    continue;
                }
                conents.append(articleText).append(" ");
            }
            String releaseTime = node.xpath(".//div/p[@title]/text()|.//div/p[@class=\"s6\"]/text()").get();
//            http://www.hualongxiang.com/maifang/15523889
//            http://www.hualongxiang.com/haodaifu/15523418
            if(StringUtils.isBlank(releaseTime)){
                continue;
            }
            String floor = node.xpath(".//a[@class=\"s2 b cp fs18\"]/text()").get();
            if("沙发".equals(floor)){
                floor = "1楼";
            }else if("板凳".equals(floor)){
                floor = "2楼";
            }else if("地板".equals(floor)){
                floor = "3楼";
            }
            if(StringUtils.isBlank(floor)){
                continue;
            }
            String commentId = node.xpath(".//a[@name]/@name").get();
            CrawlerData crawlerData = null;
            try {
                crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .releaseTime(DateUtils.parseDate(releaseTime.trim(), "发表于 yyyy-MM-dd HH:mm").getTime())
                        .url(requestUrl)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Content, conents.toString())
                        .addContentKV(Field_Floor, floor)
                        .build();
                crawlerDataList.add(crawlerData);
            } catch (Exception e) {
                log.error(e.getMessage());
            }
        }
        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error("hualongxiang download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        String sourceUrl = (String) requestRecord.getHttpRequest().getExtras().get("searchKwSourceUrl");
        if (StringUtils.isEmpty(sourceUrl)) {
            log.error("sourceUrl can not null !");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            String itemUrl = null;
            try {
                itemUrl = String.format(sourceUrl, URLEncoder.encode(keyword, "UTF-8"));
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(requestRecord)
                    .httpUrl(itemUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            crawlerRecords.add(turnRecord);
        }
    }

    private static long washTime(String time) {
        long timeToLong = 1;
        if (StringUtils.isBlank(time)) {
            return timeToLong;
        }

        try {
            if(time.contains("秒前")){
                String[] split = time.split("秒前");
                timeToLong = System.currentTimeMillis() - Integer.parseInt(split[0]) * MILLIS_PER_SECOND;
                return timeToLong;
            } else if(time.contains("分钟前")){
                String[] split = time.split("分钟前");
                timeToLong = System.currentTimeMillis() - Integer.parseInt(split[0]) * MILLIS_PER_MINUTE;
                return timeToLong;
            }else if(time.contains("小时前")){
                String[] split = time.split("小时前");
                timeToLong = System.currentTimeMillis() - Integer.parseInt(split[0]) * MILLIS_PER_HOUR;
                return timeToLong;
            }

            if(time.contains("天")){
                SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd");
                if(time.contains("昨天")){
                    Date parse = simpleDateFormat.parse(LocalDate.now().minusDays(1).toString());
                    String format = simpleDateFormat.format(parse);
                    time = time.replace("昨天",format);
                }else if(time.contains("前天")){
                    Date parse = simpleDateFormat.parse(LocalDate.now().minusDays(2).toString());
                    String format = simpleDateFormat.format(parse);
                    time = time.replace("前天",format);
                }
            }
            timeToLong = DateUtils.parseDate(time, "yyyy-MM-dd HH:mm:ss").getTime();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return timeToLong;
    }

    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    public static void main(String[] args) {
//        2分钟前 1小时前 昨天 23:28:21 前天 15:24:15 2021-02-23 20:50:26
        List<String> list = Arrays.asList("2分钟前","1小时前","昨天 23:28:21","前天 15:24:15","2021-02-23 20:50:26");
        for (String s : list) {
            System.out.println(washTime(s));
        }
    }
}
