package com.chance.cc.crawler.development.scripts.jiemian;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.time.LocalDate;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Topic_Type;
import static org.apache.commons.lang3.time.DateUtils.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/12/4 12:58
 * @Description 界面新闻
 **/
public class JiemianCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(JiemianCrawlerScript.class);
    private static final String REQUEST_AGAIN_TAG = "cctv_request_again";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";

    private static final String ENTRANCE_URL = "https://www.jiemian.com/";

    private static final String MODULES_ENTRANCE_URL = "https://www.jiemian.com/lists/\\d+.html#|https://www.jiemian.com/city/main/\\d+.html#";
    private static final String MODULES_URL = "https://www.jiemian.com/(lists|city/index)/\\d+.html";
    private static final String MODULES_JSON_URL = "https://a.jiemian.com/index.php\\?m=lists\\S*|https://papi.jiemian.com/page/api/officialAccount/getArticleByOaid\\S*";

    private static final String MODULES_ENTRANCE_CITY_URL = "https://www.jiemian.com/city/main/\\d+.html";

    private static final String MODULES_KUAIXUN_URL = "https://www.jiemian.com/lists/\\d+kb.html";
    private static final String MODULES_JSON_KUAIXUN_SOURCE_URL = "https://papi.jiemian.com/page/api/kuaixun/getlistmore?cid=%s&start_time=%s&page=2&tagid=%s";
    private static final String MODULES_JSON_KUAIXUN_URL = "https://papi.jiemian.com/page/api/kuaixun/getlistmore\\S*";
    private static final String ARTICLE_SOURCE_URL = "https://www.jiemian.com/article/%s.html";

    private static final String MODULES_TAG_URL = "https://www.jiemian.com/tags/\\d+/\\d+.html";

    private static final String ITEM_URL = "https://www.jiemian.com/article/\\d+.html";
    private static final String INTERACTION_SOURCE_URL = "https://a.jiemian.com/index.php?m=article&a=getArticleP&aid=%s";
    private static final String INTERACTION_URL = "https://a.jiemian.com/index.php\\?m=article\\S*";
    private static final String COMMENT_SOURCE_URL = "https://a.jiemian.com/index.php?m=comment&a=getlistCommentP&page=1&comment_type=1&per_page=10&aid=%s";
    private static final String COMMENT_URL = "https://a.jiemian.com/index.php\\?m=comment\\S*";

    //关键词查询
    private static final String ENTRANCE_KEYWORD_URL = "https://a.jiemian.com/index.php";
    private static final String TURN_PAGE_URL = "https://a.jiemian.com/index.php\\?m=search\\S*";
    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "jiemian";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(MODULES_ENTRANCE_URL);
        addUrlRegular(MODULES_URL);
        addUrlRegular(MODULES_JSON_URL);
        addUrlRegular(MODULES_ENTRANCE_CITY_URL);
        addUrlRegular(MODULES_KUAIXUN_URL);
        addUrlRegular(MODULES_JSON_KUAIXUN_URL);
        addUrlRegular(MODULES_TAG_URL);
        addUrlRegular(ITEM_URL);
        addUrlRegular(INTERACTION_URL);
        addUrlRegular(COMMENT_URL);
        addUrlRegular(ENTRANCE_KEYWORD_URL);
        addUrlRegular(TURN_PAGE_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().site();
        if(site.endsWith("Kw")){
            return true;
        }
        return false;
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();

        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }

        return crawlerRecords;
    }

    /**
     * 解析链接方法
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() ==503) {
            log.error("jiemian page download error!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if(httpPage.getStatusCode() != 503 && httpPage.getStatusCode() != 200){
            log.error("jiemian" + httpPage.getStatusCode());
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        Map<String, KVTag> kvTags = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags();
        if(kvTags != null){
            kvTags.remove(REQUEST_AGAIN_TAG);
        }
        if (requestUrl.matches(ENTRANCE_URL)) {
            listRecord(crawlerRequestRecord,httpPage,parsedLinks,"//li[@cid]//a/@href");
        } else if (requestUrl.matches(MODULES_ENTRANCE_URL)) {
            listRecord(crawlerRequestRecord,httpPage,parsedLinks,
                    "//div[@id=\"header-nav\"]//a/@href|" +
                            "//ul[@class=\"local-hot-nav\"]//a/@href|" +
                            "//div[@class=\"hot-city\"]//a/@href|" +
                            "//div[@class=\"local-nav-wrapper\"]//a/@href");
            if(parsedLinks.size() < 1){
                listRecord(crawlerRequestRecord,httpPage,parsedLinks,
                        "//nav[@class=\"l-nav\"]//a/@href|" +
                                "//div[@class=\"navbar-btn js-nav-hover\"]//div[@class=\"nav-drop-down\"]//a/@href");
            }
        }else if(requestUrl.matches(MODULES_ENTRANCE_CITY_URL)){
            listRecord(crawlerRequestRecord,httpPage,parsedLinks,
                    "//ul[@id=\"localList\"]/li/a/@href|//li[@class=\"list-item\"]/a/@href");
        }else if (requestUrl.matches(MODULES_URL) || requestUrl.matches(MODULES_TAG_URL)) {
            moduleUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }else if(requestUrl.matches(MODULES_KUAIXUN_URL)){
            moduleKuaixunUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        } else if (requestUrl.matches(MODULES_JSON_URL)) {
            moduleJsonUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }else if(requestUrl.matches(MODULES_JSON_KUAIXUN_URL)){
            moduleKuaixunJsonUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }else if (requestUrl.matches(ITEM_URL)) {
            articleUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }else if(requestUrl.matches(COMMENT_URL)){
            commentUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }else if(requestUrl.matches(TURN_PAGE_URL)){
            keywordUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        return parsedLinks;
    }
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("."));

        String title = httpPage.getHtml().xpath("//h1/text()").get();
        List<String> authorAll = httpPage.getHtml().xpath("//span[@class=\"author\"]/a//text()").all();
        StringBuffer authorBuffer = new StringBuffer();
        for (String data : authorAll) {
            authorBuffer.append(data).append(" ");
        }

        List<String> all = httpPage.getHtml().xpath("//div[@class=\"article-info\"]//text()").all();
        String source = "";
        String releaseTime = "";
        for (String data : all) {
            if(data.matches("来源：\\S*")){
                source = data.split("：")[1];
            }

            String string = getString("\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}", data);
            if(StringUtils.isNotEmpty(string)){
                releaseTime  = string;
            }
        }

        List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"article-content\"]//p//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            if(StringUtils.isEmpty(articleText) || articleText.contains("免责声明：")){
                continue;
            }
            conents.append(articleText).append(" ");
        }

        List<String> tags = httpPage.getHtml().xpath("//div[@class=\"main-mate\"]//a/text()").all();
        List<String> imgs = httpPage.getHtml().xpath("//div[@class=\"article-content\"]//img/@src").all();

        CrawlerData crawlerData = null;
        try {
            Long releaseTimeToLong = StringUtils.isNotEmpty(releaseTime) ? DateUtils.parseDate(releaseTime, "yyyy/MM/dd HH:mm").getTime() : crawlerRequestRecord.getReleaseTime();
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(),site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_Content, conents.toString())
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, authorBuffer.toString())
                    .addContentKV(Field_Source,source)
                    .addContentKV(Field_Images,imgs.toString())
                    .build();
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type,tags);
        } catch (ParseException e) {
            log.error("时间格式错误，文章(“" + itemUrl + "”)时间是：" + releaseTime);
        }
        return crawlerData;
    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("=") + 1);

        String text = new Json(httpPage.getRawText()).removePadding("jsonpReturn").jsonPath($_type + ".tongjiarr").get();
        JSONObject jsonObject = JSONObject.parseObject(text);
        String comments = jsonObject.getString("count");
        comments = StringUtils.isNotEmpty(comments) ? comments : "0";
        String likes = jsonObject.getString("ding");
        likes = StringUtils.isNotEmpty(likes) ? likes : "0";
        String collection = jsonObject.getString("collect");
        collection = StringUtils.isNotEmpty(collection) ? collection :"0";
        String views = jsonObject.getString("hit");
        views = StringUtils.isNotEmpty(views) ? views : "0";

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(),site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(),site, interaction.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .url(itemUrl)
                .addContentKV(Field_I_Likes, likes)
                .addContentKV(Field_I_Comments, comments)
                .addContentKV(Field_I_Collection,collection)
                .addContentKV(Field_I_Views, washNum(views))
                .build();

        return crawlerData;

    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        String articleKey = requestUrl.substring(requestUrl.lastIndexOf("=") + 1);

        String rs = httpPage.getJson().removePadding("jsonpReturn").jsonPath($_type + ".rs").get();
        if(StringUtils.isEmpty(rs)){
            return crawlerDataList;
        }

        Html html = new Html(StringEscapeUtils.unescapeJava(rs));
        List<Selectable> nodes = html.xpath("//dd[@class=\"comment-post\"]").nodes();
        for (Selectable node : nodes) {
            String author = node.xpath(".//div[@class=\"comment-body\"]/a/text()").get();
            String content = node.xpath(".//div[@class=\"comment-body\"]/div[@class=\"comment-main\"]/p/text()").get();
            String releaseTime = node.xpath(".//span[@class=\"date\"]/text()").get();

            String commentId = node.xpath("./@id").get();
            try {
                CrawlerData crawlerData  = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(),site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .releaseTime(wordToDate(releaseTime))
                        .url(crawlerRequestRecord.getHttpRequest().getUrl())
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Content, content)
                        .build();
                crawlerDataList.add(crawlerData);


                String comments = node.xpath("//span[@class=\"comment\"]/em/text()").get();
                comments = StringUtils.isBlank(comments) ? "0" :comments.substring(1,comments.length() - 1);
                String disLikes = node.xpath("//span[@class=\"discomment\"]/em/text()").get();
                disLikes = StringUtils.isBlank(disLikes) ? "0" :disLikes.substring(1,disLikes.length() - 1);
                String likes = node.xpath("//span[@class=\"like\"]/em/text()").get();
                likes = StringUtils.isBlank(likes) ? "0" :likes.substring(1,likes.length() - 1);
                CrawlerData crawlerDataInteraction = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(),site, comment.enumVal(), commentId))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(),interaction, commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                        .releaseTime(wordToDate(releaseTime))
                        .url(crawlerRequestRecord.getHttpRequest().getUrl())
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_I_Comments, comments)
                        .addContentKV(Field_I_Likes, likes)
                        .addContentKV(Field_I_Dislikes, disLikes)
                        .build();
                crawlerDataList.add(crawlerDataInteraction);
            } catch (ParseException e) {
                log.error("时间格式错误！时间是：" + releaseTime);
            }
        }
        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void listRecord(CrawlerRequestRecord crawlerRequestRecord,HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks,String xpath){
        String requestUrl = httpPage.getRequest().getUrl();
        List<String> all = httpPage.getHtml().xpath(xpath).all();
        for (String url : all) {
            if(requestUrl.matches(ENTRANCE_URL)){
                url = url.matches("/city/main/\\d+.html") ? "https://www.jiemian.com" + url + "#" : url + "#";
            }
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();

            parsedLinks.add(itemRecord);
        }
    }

    private void moduleUrlRecord(CrawlerRequestRecord crawlerRequestRecord,HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        String requestUrl = httpPage.getRequest().getUrl();
        //获取板块对应的json数据地址
        String url = httpPage.getHtml().xpath("//div[@class=\"load-more\"]/@url|//div[@class=\"list-pager\"]/ul/li[last()]/a/@href|//div[@class=\"page-more\"]/@url").get();
        if(StringUtils.isNotEmpty(url)){
            url = requestUrl.matches(MODULES_URL) ? StringEscapeUtils.unescapeHtml(url) + "&page=2" : StringEscapeUtils.unescapeHtml(url);
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(record);
        }

        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"news-right\"]|//div[@class=\"item-info\"]|//div[@class=\"text-news\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath(".//div[@class=\"news-header\"]/h3/a/@href|./a/@href").get();
            if(StringUtils.isBlank(itemUrl)){
                continue;
            }

            String releaseTime = node.xpath(".//span[@class=\"date\"]/text()").get();
            if(StringUtils.isBlank(releaseTime)){
                List<String> all = node.xpath("./p/text()").all();
                for (String data : all) {
                    if(StringUtils.isNotBlank(data)){
                        releaseTime = data.trim();
                        break;
                    }
                }
            }
            if(StringUtils.isBlank(releaseTime)){
                continue;
            }

            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(wordToDate(releaseTime))
                        .copyBizTags()
                        .copyResultTags()
                        .build();

                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void keywordUrlRecord(CrawlerRequestRecord crawlerRequestRecord,HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        String keyword = (String) crawlerRequestRecord.getHttpRequest().getExtras().get("keyword");
        String requestUrl = httpPage.getRequest().getUrl();
        if(!requestUrl.contains("page")){
            String result = httpPage.getHtml().xpath("//a[@class=\"active\"]/text()").get();
            if(StringUtils.isBlank(result)){
                log.error(keyword + "-热门词汇");
                return;
            }

            if("新闻(0)".equals(result)){
                log.error(keyword + " search result is 0!");
                return;
            }
        }

        //翻页
        String nextUrl = httpPage.getHtml().xpath("//div[@class=\"list-pager\"]/ul/li[last()]/a/@href").get();
        if (StringUtils.isNotEmpty(nextUrl)) {
            nextUrl = StringEscapeUtils.unescapeHtml(nextUrl);
            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .copyBizTags()
                    .copyResultTags()
                    .releaseTime(System.currentTimeMillis())
                    .build();
            turnRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
            parsedLinks.add(turnRecord);
        }

        //文章详情页
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[contains(@class,\"news-view\")]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath(".//div[@class=\"news-header\"]/h3/a/@href").get();
            if (StringUtils.isEmpty(itemUrl)) {
                continue;
            }

            String releaseTime = node.xpath(".//span[@class=\"date\"]").get();
            if (StringUtils.isEmpty(releaseTime)) {
                continue;
            }

            try {
                long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy/MM/dd HH:mm").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl + keyword)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();

                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void moduleJsonUrlRecord(CrawlerRequestRecord crawlerRequestRecord,HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        String requestUrl = httpPage.getRequest().getUrl();
        //翻页
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            if("page".equals(nameValuePair.getName())){
                nextUrl = nextUrl + nameValuePair.getName() + "=" + (Integer.parseInt(nameValuePair.getValue()) + 1) + "&";
            }else{
                nextUrl = nextUrl + nameValuePair.getName() + "=" + nameValuePair.getValue() + "&";
            }
        }
        nextUrl = nextUrl.substring(0,nextUrl.length() - 1);
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);


        //文章列表页解析
        Html html = new Html(httpPage.getJson().removePadding("").jsonPath($_type + ".rst").get());
        List<Selectable> nodes = html.xpath("//div[@class=\"news-right\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath(".//div[@class=\"news-header\"]/h3/a/@href").get();
            if(StringUtils.isEmpty(itemUrl)){
                continue;
            }

            String releaseTime = node.xpath(".//span[@class=\"date\"]/text()").get();
            if(StringUtils.isEmpty(releaseTime)){
                continue;
            }

            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(wordToDate(releaseTime))
                        .copyBizTags()
                        .copyResultTags()
                        .build();

                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void moduleKuaixunUrlRecord(CrawlerRequestRecord crawlerRequestRecord,HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        String cid = getString("cid = '\\S*'",httpPage.getRawText());
        String tagid = getString("tagid = '\\d+'",httpPage.getRawText());
        if(StringUtils.isNotEmpty(cid) && StringUtils.isNotEmpty(tagid)){
            cid = cid.substring(cid.indexOf("'") + 1,cid.lastIndexOf("'"));
            tagid = tagid.substring(tagid.indexOf("'") + 1,tagid.lastIndexOf("'"));
            String startTime = httpPage.getHtml().xpath("(//div[@class=\"item-news a-item\"])[last()]/@data-time").get();

            String url = String.format(MODULES_JSON_KUAIXUN_SOURCE_URL,cid,startTime,tagid);
            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(turnRecord);
        }

        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"item-news a-item\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath("./div[@class=\"item-main\"]//a/@href").get();
            if(StringUtils.isEmpty(itemUrl)){
                continue;
            }

            String releaseTime = node.xpath("./@data-time").get();
            if(StringUtils.isEmpty(releaseTime)){
                continue;
            }

            long releaseTimeToLong = Long.parseLong(releaseTime + "000");
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(itemRecord);
        }
    }

    private void moduleKuaixunJsonUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        String requestUrl = httpPage.getRequest().getUrl();
        List<String> all = httpPage.getJson().jsonPath($_type + ".result.list").all();
        //翻页
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        String publishtime = JSONObject.parseObject(all.get(all.size() - 1)).getString("publishtime");
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            if("page".equals(nameValuePair.getName())){
                nextUrl = nextUrl + nameValuePair.getName() + "=" + (Integer.parseInt(nameValuePair.getValue()) + 1) + "&";
            }else if("start_time".equals(nameValuePair.getName())){
                nextUrl = nextUrl + nameValuePair.getName() + "=" + publishtime + "&";
            }else{
                nextUrl = nextUrl + nameValuePair.getName() + "=" + nameValuePair.getValue() + "&";
            }
        }
        nextUrl = nextUrl.substring(0,nextUrl.length() - 1);
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);

        //详情页
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String id = jsonObject.getString("id");
            if(StringUtils.isEmpty(id)){
                continue;
            }

            String releaseTime = jsonObject.getString("publishtime");
            if(StringUtils.isEmpty(releaseTime)){
                continue;
            }

            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(String.format(ARTICLE_SOURCE_URL, id))
                    .releaseTime(Long.parseLong(releaseTime + "000"))
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(itemRecord);
        }
    }

    private void articleUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        String requestUrl = httpPage.getRequest().getUrl();
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        String id = requestUrl.substring(requestUrl.lastIndexOf("/") + 1, requestUrl.lastIndexOf("."));
        if (resultTags.hasDataType(interaction)) {
            String url = String.format(INTERACTION_SOURCE_URL, id);

            CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .copyBizTags()
                    .resultLabelTag(interaction)
                    .build();
            parsedLinks.add(interactionRecord);
            crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().getLabelTags().remove("interaction");
        }

        if(resultTags.hasDataType(comment)){
            CrawlerRecord commentFilter = null;
            if((commentFilter = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record",CrawlerRequestRecord.class)) == null){
                log.error("comment filter can not null!");
                return ;
            }
            String url = String.format(COMMENT_SOURCE_URL, id);

            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .notFilterRecord()
                    .needWashed(true)
                    .resultLabelTag(comment)
                    .build();
            commentRecord.setFilterInfos(commentFilter.getFilterInfos());
            commentRecord.setFilter(commentFilter.getFilter());
            crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().getLabelTags().remove("comment");
            parsedLinks.add(commentRecord);
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        //todo 回复的评论未采集到
        String requestUrl = httpPage.getRequest().getUrl();
        //回复翻页
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        int page = 1;
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            if("page".equals(nameValuePair.getName())){
                page = Integer.parseInt(nameValuePair.getValue());
                nextUrl = nextUrl + nameValuePair.getName() + "=" + (page + 1) + "&";
            }else{
                nextUrl = nextUrl + nameValuePair.getName() + "=" + nameValuePair.getValue() + "&";
            }
        }
        nextUrl = nextUrl.substring(0,nextUrl.length() - 1);

        //判断是否要进行翻页
        String pageCount = httpPage.getJson().removePadding("jsonpReturn").jsonPath($_type + ".page_count").get();
        if(page >= Integer.parseInt(pageCount)){
            return ;
        }

        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();
        parsedLinks.add(record);
    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error("jiemian download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if(type == null){
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }else{
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            String itemUrl = null;
            try {
                itemUrl = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
            if(StringUtils.isBlank(itemUrl)){
                continue;
            }
            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(requestRecord)
                    .httpUrl(itemUrl)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .releaseTime(System.currentTimeMillis())
                    .build();
            if(itemUrl.matches(TURN_PAGE_URL)){
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                turnRecord.getHttpRequest().addExtra("keyword",keyword);
            }else if(itemUrl.matches(MODULES_URL)){
                turnRecord.tagsCreator().bizTags().addCustomKV(Tag_Site_Info,keyword);
                turnRecord.tagsCreator().bizTags().addCustomKV(Field_Path,itemUrl);
            }
            crawlerRecords.add(turnRecord);
        }
    }

    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    /**
     * @param word
     * @return java.lang.String
     * @author Zhao.Hhuan
     * @date 2020/9/4 13:28
     * @description: 转换时间
     **/
    public static Long wordToDate(String word) throws ParseException {
        Long releaseTimeToLong = null;
        final String YESTODAY = "昨天";
        final String TODAY = "今天";
        final String CURRENT = "刚刚";
        LocalDate today = LocalDate.now();
        LocalDate time = null;
        long currentTimeMillis = System.currentTimeMillis();
        if (word.contains("个")) {
            word = word.replace("个", "");
        }
        if (CURRENT.equals(word)) {
            releaseTimeToLong = currentTimeMillis;
        } else if (word.contains("前")) {
            if (word.contains("秒")) {
                releaseTimeToLong = currentTimeMillis - Integer.parseInt(getString("\\d+", word)) * MILLIS_PER_SECOND;
            } else if (word.contains("分钟")) {
                releaseTimeToLong = currentTimeMillis - Integer.parseInt(getString("\\d+", word)) * MILLIS_PER_MINUTE;
            } else if (word.contains("小时")) {
                releaseTimeToLong = currentTimeMillis - Integer.parseInt(getString("\\d+", word)) * MILLIS_PER_HOUR;
            } else if (word.contains("天")) {
                releaseTimeToLong = currentTimeMillis - Integer.parseInt(getString("\\d+", word)) * MILLIS_PER_DAY;
            } else if (word.contains("月")) {
                releaseTimeToLong = currentTimeMillis - Integer.parseInt(getString("\\d+", word)) * 30 * MILLIS_PER_DAY;
            } else if (word.contains("年")) {
                releaseTimeToLong = currentTimeMillis - Integer.parseInt(getString("\\d+", word)) * 365 * MILLIS_PER_DAY;
            }
        } else if (word.contains(TODAY)) {
            word = word.replaceAll(TODAY, "");
            word = String.valueOf(today) + " " + word.trim();
        } else if (word.contains(YESTODAY)) {
            time = today.minus(1, ChronoUnit.DAYS);
            word = word.replaceAll(YESTODAY, "");
            word = String.valueOf(time) + " " + word.trim();
        }
        if (word.matches("\\d{2}/\\d{2} \\d{2}:\\d{2}")) {
            //判断是属于哪一年的
            int monthValue = today.getMonthValue();
            int dayOfMonth = today.getDayOfMonth();
            if(monthValue >= Integer.parseInt(word.substring(0,word.indexOf("/"))) && dayOfMonth >= Integer.parseInt(word.substring(word.indexOf("/") + 1,word.indexOf(" ")).trim())){
                word = today.getYear() + word;
            }else{
                word = (today.getYear() - 1) + word;
            }
            releaseTimeToLong = DateUtils.parseDate(word.trim(), "yyyyMM/dd HH:mm").getTime();
        } else if (word.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}")) {
            releaseTimeToLong = DateUtils.parseDate(word.trim(), "yyyy-MM-dd HH:mm").getTime();
        }else if(word.matches("\\d{4}/\\d+/\\d+ \\d{2}:\\d{2}")){
            releaseTimeToLong = DateUtils.parseDate(word.trim(), "yyyy/MM/dd HH:mm").getTime();
        }
        return releaseTimeToLong;
    }

    /**
     * @param num
     * @return java.lang.String
     * @author Zhao.Hhuan
     * @date 2020/9/4 14:50
     * @description: 将含有w的数据转为数字
     **/
    private String washNum(String num) {
        final String W = "w";
        final String D = ".";
        int views = 0;
        if (num.contains(W)) {
            num = num.replace(W, "");
            if (num.contains(D)) {
                num = num.replace(D, "");
                views = Integer.parseInt(num) * 1000;
            } else {
                views = Integer.parseInt(num) * 10000;
            }
        }
        return String.valueOf(views);
    }

    public static void main(String[] args) {
        String word = "02/02 12:03";
        try {
            Long releseTimeToLong = wordToDate(word);
            System.out.println(releseTimeToLong);
        } catch (ParseException e) {
            e.printStackTrace();
        }
    }

}
