package com.chance.cc.crawler.development.scripts.xchuxing;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.comment;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static org.apache.commons.lang3.time.DateUtils.*;

/**
 * @Author: ZhaoHhuan on 2021/11/11 10:50
 * @Email: 18638575967@163.com
 * @Description:
 *      新出行采集脚本
**/
public class XChuXingCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(XChuXingCrawlerScript.class);
    private static final String DOMAIN = "xchuxing";
    private static final String SITE = "searchKw";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";
    private static final String PREFIX = "https://www.xchuxing.com";

    private static final String SOURCE_URL =  "https://www.xchuxing.com/";
    private static final String SEARCH_URL = "http[s]*://www.xchuxing.com/community/\\d+\\?page=\\d+\\S*";
    private static final String ITEM_URL = "https://www.xchuxing.com/[A-Za-z]+/\\d+";
    private static final String COMMENT_URL = "https://www.xchuxing.com/comments/\\d+/\\d+/\\d+\\?page=\\d+";

    private static final String COMMENT_SOURCE_URL = "https://www.xchuxing.com/comments/%s/%s/1?page=1";
    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(SOURCE_URL);
        addUrlRegular(SEARCH_URL);
        addUrlRegular(ITEM_URL);
        addUrlRegular(COMMENT_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String siteTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return siteTag.equals(SITE);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() != 200) {
            log.error("url [{}] page download error!will retry",requestUrl);
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (requestUrl.matches(SEARCH_URL)) {
            searchUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(ITEM_URL)) {
            itemUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_URL)) {
            commentUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        return parsedLinks;
    }

    private void searchUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        try{
            String requestUrl = httpPage.getRequest().getUrl();
            String[] pages = requestUrl.split("page=");
            int page = 1;
            String lastOrder = "";
            if(requestUrl.matches("http[s]*://www.xchuxing.com/community/\\d+\\?page=1")){
                page = Integer.parseInt(pages[1]) + 1;
                lastOrder = httpPage.getHtml().xpath("//div[@class=\"content\"]/dl/@data-order").get();

                List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"content\"]/dl/div").nodes();
                for (Selectable node : nodes) {
                    String itemUrl = node.xpath("./dd/a/@href|./dd/div[@class=\"article\"]/a/@href").get();
                    //TODO xpath不能获取到值
//                    String releaseTime = node.xpath("./dt/a/p/span").get();
                    String releaseTime = washContent("<span>(.*)?</span>",node.all().get(1)).split("<span>")[1].split("</span>")[0];
                    if(StringUtils.isBlank(releaseTime) || StringUtils.isBlank(itemUrl)){
                        continue;
                    }else{
                        releaseTime = releaseTime.contains(" · ") ? releaseTime.split(" · ")[0] : releaseTime;
                        itemUrl = PREFIX + itemUrl;
                    }

                    releaseTime = releaseTime.contains("&amp;middot;") ? releaseTime.split("&amp;middot;")[0] : releaseTime;
                    long releaseTimeToLong = washTime(releaseTime);
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .copyBizTags()
                            .copyResultTags()
                            .build();
                    parsedLinks.add(itemRecord);
                }
            }else{
                String[] split = pages[1].split("&last_order=");
                page = Integer.parseInt(split[0]) + 1;
                lastOrder =  httpPage.getJson().jsonPath($_type + ".data.pages.last_order").get();

                List<String> all = httpPage.getJson().jsonPath($_type + ".data.data").all();
                for (String s : all) {
                    JSONObject jsonObject = JSONObject.parseObject(s);
                    String itemId = jsonObject.getString("object_id");
                    String type = jsonObject.getString("type");
                    String releaseTime = jsonObject.getString("created_at");
                    String itemUrl  = "";
                    if(StringUtils.isBlank(itemId) || StringUtils.isBlank(type) || StringUtils.isBlank(releaseTime)){
                        continue;
                    }else{
                        if(type.equals("4")){
                            itemUrl = PREFIX + "/ins/" + itemId;
                        }else{
                            itemUrl = PREFIX + "/article/" + itemId;
                        }
                        long releaseTimeToLong = Long.parseLong(releaseTime + "000");
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .httpUrl(itemUrl)
                                .releaseTime(releaseTimeToLong)
                                .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                                .copyBizTags()
                                .copyResultTags()
                                .build();
                        parsedLinks.add(itemRecord);
                    }
                }
            }

            if(page == 1 || StringUtils.isBlank(lastOrder)){
                log.error("next page get error!currentUrl is [{}]",requestUrl);
                return;
            }else{
                //翻页
                String nextUrl = pages[0] + "page=" + page + "&last_order=" + lastOrder;
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(nextUrl)
                        .releaseTime(System.currentTimeMillis())
                        .httpHead("X-Requested-With","XMLHttpRequest")
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
                parsedLinks.add(turnRecord);
            }
        }catch (Exception e){
            log.error(e.getMessage());
        }
    }

    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String keyword = requestUrl.substring(requestUrl.lastIndexOf("/") + 1);
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        String isContain = httpPage.getHtml().xpath("//div[@class=\"error\"]/span").get();
        if(StringUtils.isNotBlank(isContain)){
            log.error("[{}] {}",requestUrl,isContain);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        if (resultTags.hasDataType(comment)) {
            resultTags.getCategoryTag().removeLabelTag("comment");

            CrawlerRequestRecord filterRecord = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class);
            if (filterRecord == null) {
                log.error("filter record can not null !");
                return;
            }

            String span = httpPage.getHtml().xpath("//h2[text()='相关评论']/span").get();
            if(StringUtils.isNotBlank(span)){
                String num = washContent("\\d+", span);
                if(StringUtils.isNotBlank(num) && Integer.parseInt(num) > 0){
                    String isArticle = httpPage.getHtml().xpath("//em[@class=\"cate-tag\"]").get();
                    String commentUrl = StringUtils.isNotBlank(isArticle) ? String.format(COMMENT_SOURCE_URL,keyword,"1") : String.format(COMMENT_SOURCE_URL,keyword,"4");
                    CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(commentUrl)
                            .releaseTime(System.currentTimeMillis())
                            .httpHead("X-Requested-With","XMLHttpRequest")
                            .notFilterRecord()
                            .copyBizTags()
                            .needWashed(true)
                            .resultLabelTag(comment)
                            .build();
                    commentRecord.setFilter(filterRecord.getFilter());
                    commentRecord.setFilterInfos(filterRecord.getFilterInfos());
                    commentRecord.getHttpRequest().addExtra("articleUrl", requestUrl);
                    parsedLinks.add(commentRecord);
                }else{
                    log.error("article [{}] comment is 0!",requestUrl);
                }
            }
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //评论翻页
        String requestUrl = httpPage.getRequest().getUrl();
        String nextUrl = getNextUrl(requestUrl,null,"page");
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();
        commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        parsedLinks.add(commentRecord);
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String title = httpPage.getHtml().xpath("//h1/text()").get();
        String author = httpPage.getHtml().xpath("//aside//h2").get();
        String authorId = httpPage.getHtml().xpath("//aside//a/@href").get();
        authorId = StringUtils.isBlank(authorId) ? "" : authorId.substring(authorId.lastIndexOf("/") + 1);
        String fans = "0";
        List<Selectable> nodes = httpPage.getHtml().xpath("//aside//li").nodes();
        for (Selectable node : nodes) {
            List<String> all = node.xpath("./text()").all();
            if(all.contains("\n" +
                    "粉丝\n")){
                fans = node.xpath("./em/text()").get();
                break;
            }
        }
        String isArticle = httpPage.getHtml().xpath("//em[@class=\"cate-tag\"]").get();
        String releaseTime = httpPage.getHtml().xpath("//h1/div[@class=\"flex-middle\"]/span/text()|//a[@class=\"flex-middle\"]/span/span/text()").get();
        releaseTime = releaseTime.contains("&amp;middot;") ? releaseTime.split("&amp;middot;")[0] : releaseTime;
        List<String> all = httpPage.getHtml().xpath("//div[@class=\"content-main\"]/div//p/text()|//div[@class=\"ins-content\"]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : all) {
            conents.append(articleText).append(" ");
        }

        CrawlerData crawlerData = null;
        try {
            long releaseTimeToLong = washTime(releaseTime);
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_Content, conents.toString())
                    .addContentKV(Field_Title, StringUtils.isBlank(title) ? "" : title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Author_Fans,fans)
                    .build();
            crawlerData.tagsCreator().bizTags().addCustomKV("isArticle",StringUtils.isNotBlank(isArticle) ? "true" : "false");
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerData;
    }

    private CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String releaseTime = httpPage.getHtml().xpath("//h1/div[@class=\"flex-middle\"]/span/text()|//a[@class=\"flex-middle\"]/span/span/text()").get();
        releaseTime = releaseTime.contains("&amp;middot;") ? releaseTime.split("&amp;middot;")[0] : releaseTime;
        String likes = httpPage.getHtml().xpath("//li[@class=\"like\"]/span").get();
        String span = httpPage.getHtml().xpath("//h2[text()='相关评论']/span").get();
        String comments = "0";
        if(StringUtils.isNotBlank(span)) {
            comments = washContent("\\d+", span);
            comments = StringUtils.isNotBlank(comments) ? comments : "0";
        }

        CrawlerData crawlerData = null;
        try {
            long releaseTimeToLong = washTime(releaseTime);
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_I_Comments, comments)
                    .addContentKV(Field_I_Likes, washNum(likes))
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerData;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1);

        List<String> commentList = httpPage.getJson().jsonPath($_type + ".data").all();
        for (String data : commentList) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            washCommentOrReply(crawlerRequestRecord,httpPage,crawlerDataList,articleKey,jsonObject);

            if(jsonObject.getJSONArray("reply") != null)
            {
                List<JSONObject> reply = jsonObject.getJSONArray("reply").toJavaList(JSONObject.class);
                for (JSONObject object : reply) {
                    washCommentOrReply(crawlerRequestRecord,httpPage,crawlerDataList,articleKey,object);
                }
            }
        }
        return crawlerDataList;
    }

    private void washCommentOrReply(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerData> crawlerDataList,String articleKey,JSONObject jsonObject){
        String requestUrl = httpPage.getRequest().getUrl();
        JSONObject author = jsonObject.getJSONObject("author");
        String username = author.getString("username");
        String userId = author.getString("id");
        String fans = author.getString("fans");
        String focus = author.getString("focus");//关注数
        String releaseTime = jsonObject.getString("relativeTime");
        String content = jsonObject.getString("content");
        String commentId = jsonObject.getString("id");


        String likes = jsonObject.getString("liketimes");
        String comments = jsonObject.getString("commentnum");

        try{
            CrawlerData commentArticle = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(),commentId))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                    .releaseTime(washTime(releaseTime))
                    .url(requestUrl)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .addContentKV(Field_Author, username)
                    .addContentKV(Field_Author_Id, userId)
                    .addContentKV(Field_Author_Fans, fans)
                    .addContentKV(Field_Attention_Number,focus)
                    .addContentKV(Field_Content, content)
                    .build();
            crawlerDataList.add(commentArticle);


            CrawlerData commentInteraction = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentId))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(),interaction.enumVal(),commentId))
                    .resultLabelTag(interaction)
                    .releaseTime(washTime(releaseTime))
                    .url(requestUrl)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_I_Comments, comments)
                    .build();
            crawlerDataList.add(commentInteraction);
        }catch (Exception e){
            log.error(e.getMessage());
        }
    }

    private static String washNum(String text) {
        if (StringUtils.isBlank(text)) {
            return "0";
        }

        text = text.toLowerCase();
        if (text.contains("w")) {
            String[] split = text.split("w");
            String num = String.valueOf(Double.parseDouble(split[0].trim()) * 10000);
            return num.split("\\.")[0];
        }

        return text;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String url = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                turnRecord.getHttpRequest().addExtra("keyword",keyword);
                crawlerRecords.add(turnRecord);
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }


    /**
     * 获取下一页的地址
     * @param requestUrl
     * @param keyword
     * @param page
     * @return
     */
    private String getNextUrl(String requestUrl, String keyword, String page) {
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if (StringUtils.isNotBlank(page) && page.equals(name)) {
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            } else if (StringUtils.isNotBlank(keyword) && keyword.equals(name)) {
                try {
                    nextUrl = nextUrl + name + "=" + URLEncoder.encode(value, "UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
//                    log.error(e.getMessage());
                }
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }
        }
        return nextUrl.substring(0, nextUrl.length() - 1);
    }

    private static long washTime(String time) throws ParseException {
        long releaseTimeToLong = 0;
        if (StringUtils.isBlank(time)) {
            return releaseTimeToLong;
        }
        time = time.replace(" ","");

        if (time.matches("\\d{2}月\\d{2}日") || time.matches("\\d{2}月\\d{2}日\\d{2}:\\d{2}")) {
            time = LocalDate.now().getYear() + time;
        }

        long currentTime = System.currentTimeMillis();
        if ("刚刚".equals(time)) {
            releaseTimeToLong = currentTime;
        }else if(time.matches("\\d+秒前")){
            String num = time.split("秒")[0];
            releaseTimeToLong = currentTime - Integer.parseInt(num) * MILLIS_PER_SECOND;
        } else if (time.matches("\\d+分钟前")) {
            String num = time.split("分钟")[0];
            releaseTimeToLong = currentTime - Integer.parseInt(num) * MILLIS_PER_MINUTE;
        } else if (time.matches("\\d+小时前")) {
            String num = time.split("小时")[0];
            releaseTimeToLong = currentTime - Integer.parseInt(num) * MILLIS_PER_HOUR;
        } else if (time.matches("\\d{4}-\\d{2}-\\d{2}")) {
            releaseTimeToLong = DateUtils.parseDate(time, "yyyy-MM-dd").getTime();
        }else{
            return DateUtils.parseDate(time, "yyyy-MM-ddHH:mm:ss", "发表于 yyyy/MM/ddHH:mm", "yyyyMM月dd日", "yyyy-MM-ddHH:mm", "yyyyMM月dd日HH:mm", "日期：yyyy-MM-dd", "yyyy年MM月dd日HH:mm",
                    "时间：yyyy-MM-ddHH:mm:ss").getTime();
        }
        return releaseTimeToLong;

    }

    private static String washContent(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }
    public static void main(String[] args) {
        String url = "https://www.xchuxing.com/comments/56661/4/1?page=1";
        boolean matches = url.matches(COMMENT_URL);
        System.out.println(matches);

    }
}
