package com.chance.cc.crawler.development.scripts.weibo.self;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConstant;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.time.LocalDate;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.filter;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static org.apache.commons.lang3.time.DateUtils.MILLIS_PER_MINUTE;
import static org.apache.commons.lang3.time.DateUtils.MILLIS_PER_SECOND;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/4/29 13:30
 * @Description
 *     微博帖子采集脚本
 **/
public class WeiboPostCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(WeiboPostCrawlerScript.class);
    private static final String DOMAIN = "weibo";
    private static final String SITE = "post";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";

    private static final String ENTRANCE_URL = "https://s.weibo.com/\\?Refer=";
    private static final String POST_URL = "http[s]*://weibo.com/\\d+/[a-zA-Z0-9\\?\\_\\=]*";
    private static final String COMMENT_URL = "https://weibo.com/aj/v6/comment/big\\?ajwvr=6&id=\\S*&from=singleWeiBo";
    private static final String COMMENT_REPLY_URL = "https://weibo.com/aj/v6/comment/big\\?ajwvr=6&more_comment=\\S*&from=singleWeiBo";

    private static final String GET_TID_URL = "https://passport.weibo.com/visitor/genvisitor";
    private static final String GET_COOKIE_SOURCE_URL = "https://passport.weibo.com/visitor/visitor?a=incarnate&t=%s&w=2&c=095&gc=&cb=cross_domain&from=weibo&_rand=%s";
    private static final String GET_COOKIE_URL = "https://passport.weibo.com/visitor/visitor\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "weibo";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(POST_URL);
        addUrlRegular(COMMENT_URL);
        addUrlRegular(COMMENT_REPLY_URL);
        addUrlRegular(GET_TID_URL);
        addUrlRegular(GET_COOKIE_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String searchKw = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return SITE.equals(searchKw);
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
//    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || (httpPage.getStatusCode() != 200 && httpPage.getStatusCode() != 404)) {
            try {
                Thread.sleep(60000);
            } catch (InterruptedException e) {
                log.error(e.getMessage());
            }
            //下载失败，重新进行下载
            log.warn("{} status code : [{}],will retry!",requestUrl,httpPage.getStatusCode());
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        String title = httpPage.getHtml().xpath("//title").get();
        if("Sina Visitor System".equals(title)){
            crawlerRequestRecord.setNeedWashPage(false);
            visiteSystemRecord(crawlerRequestRecord,httpPage,parsedLinks);
            return parsedLinks;
        }

        if(requestUrl.matches(GET_TID_URL)){
            getTidUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(GET_COOKIE_URL)){
            getCookieUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(POST_URL)){
            postUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(COMMENT_URL)){
            commentUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(COMMENT_REPLY_URL)){
            commentReplyUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return parsedLinks;
    }

    private void visiteSystemRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        String url = httpPage.getRequest().getUrl();
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(GET_TID_URL)
                .recordKey(GET_TID_URL + url)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        HttpRequest httpRequest = record.getHttpRequest();
        httpRequest.setMethod(HttpConstant.Method.POST);
        Map<String,Object> params = new HashMap<>();
        params.put("cb","gen_callback");
        params.put("fp","{\"os\":\"1\",\"browser\":\"Chrome89,0,4389,114\",\"fonts\":\"undefined\",\"screenInfo\":\"1536*864*24\",\"plugins\":\"Portable Document Format::internal-pdf-viewer::Chrome PDF Plugin|::mhjfbmdgcfjbbpaeojofohoefgiehjai::Chrome PDF Viewer|::internal-nacl-plugin::Native Client\"}");
        httpRequest.setRequestBody(HttpRequestBody.form(params,"UTF-8"));
        httpRequest.addHeader("Content-Type","application/x-www-form-urlencoded");
        record.tagsCreator().bizTags().addCustomKV("resourceUrl",url);
        record.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        parsedLinks.add(record);
    }

    private void getTidUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        String resourceUrl = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("resourceUrl");
        String tid = httpPage.getJson().removePadding("window.gen_callback && gen_callback").jsonPath($_type + ".data.tid").get();
        try {
            String url = String.format(GET_COOKIE_SOURCE_URL,URLEncoder.encode(tid,"UTF-8"),Math.random());
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .recordKey(url + resourceUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            record.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
            parsedLinks.add(record);
        } catch (UnsupportedEncodingException e) {
            log.error(e.getMessage());
        }
    }

    private void getCookieUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        String resourceUrl = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("resourceUrl");
        String cookies = "";
        String s = httpPage.getJson().removePadding("window.cross_domain && cross_domain").jsonPath($_type + ".data").get();
        Map map = JSONObject.parseObject(s, Map.class);
        for (Object o : map.keySet()) {
            cookies  =  cookies + o + "=" + map.get(o) + ";";
        }
        cookies = cookies.substring(0,cookies.length() - 1);

        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(resourceUrl)
                .releaseTime(System.currentTimeMillis())
                .httpHead("cookie",cookies)
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        record.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        parsedLinks.add(record);
    }

    private void postUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        String rawText = httpPage.getRawText();
        //页面可能不存在
        String title = httpPage.getHtml().xpath("//title/text()").get();
        if (rawText.contains("parent.window.location=\"https://weibo.com/sorry?pagenotfound\"") || "404错误".equals(title)) {
            log.error("this Page(“" + requestUrl + "”) does not exist!");
            crawlerRequestRecord.setNeedWashPage(false);
            return ;
        }

        //页面存在
        String[] split = rawText.split("<script>");
        String mainText = "";
        for (String data : split) {
            if (data.contains("follow_recommend_box")) {
                mainText = data;
                break;
            }
        }
        if (StringUtils.isEmpty(mainText)) {
            log.error("mainText(" + requestUrl + ") is null!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        Json json = new Json(mainText.substring(mainText.lastIndexOf("({") + 1, mainText.lastIndexOf(")")));
        Html html = new Html(json.jsonPath($_type + ".html").get());
        httpPage.setHtml(html);

        //comments
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (resultTags.hasDataType(comment)) {
            resultTags.getCategoryTag().removeLabelTag("comment");

            CrawlerRequestRecord filterRecord = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class);
            if (filterRecord == null) {
                log.error("filter record can not null !");
                return;
            }

            String uri = html.xpath("//a[@node-type=\"feed_list_commentTabAll\"]/@action-data").get();
            if (StringUtils.isEmpty(uri)) {
                log.error("can not get articleCommentUrl,requestUrl is :" + requestUrl);
                return;
            }

            String articleCommentUrl = "https://weibo.com/aj/v6/comment/big?ajwvr=6&" + StringEscapeUtils.unescapeHtml(uri);
            if (!uri.contains("from=singleWeiBo")) {
                articleCommentUrl = articleCommentUrl + "&from=singleWeiBo";
            }
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(articleCommentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .needWashed(true)
                    .resultLabelTag(comment)
                    .build();
            commentRecord.getHttpRequest().addExtra("articleUrl", requestUrl);
            commentRecord.setFilter(filterRecord.getFilter());
            commentRecord.setFilterInfos(filterRecord.getFilterInfos());
            parsedLinks.add(commentRecord);
        }
    }


    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        String htmlJson = "";
        try {
            htmlJson = httpPage.getJson().jsonPath($_type + ".data.html").get();
        } catch (Exception e) {
            log.error("articleComment page(“" + requestUrl + "”) download fail！will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return ;
        }
        Html html = new Html(htmlJson);
        httpPage.setHtml(html);

        //回复翻页
        String nextArticleCommentUrl = httpPage.getHtml().xpath("//a[@action-type=\"click_more_comment\"]/@action-data|//div[@node-type=\"comment_loading\"]/@action-data").get();
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@comment_id]").nodes();
        if (nodes.size() < 1) {
            crawlerRequestRecord.setNeedWashPage(false);
            if (StringUtils.isNotBlank(nextArticleCommentUrl)) {
                log.error("articleComment page(" + requestUrl + ") comment num is 0 ！will go to next page");
                CrawlerRequestRecord nextCommentUrlRecord = getNextCommentUrlRecord(crawlerRequestRecord, httpPage, nextArticleCommentUrl);
                nextCommentUrlRecord.tagsCreator().requestTags().removeRequestType(filter);
                crawlerRequestRecord.setNeedWashPage(false);
                parsedLinks.add(nextCommentUrlRecord);
                return;
            } else {
                log.error("articleComment page(" + requestUrl + ") comment num is 0 ！will retry");
                requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
                crawlerRequestRecord.setNeedWashPage(false);
                return ;
            }
        }

        if(StringUtils.isBlank(nextArticleCommentUrl)){
            log.error("articleComment next page url can not get!this page is [{}]",requestUrl);
        }else{
            parsedLinks.add(getNextCommentUrlRecord(crawlerRequestRecord, httpPage, nextArticleCommentUrl));
        }

        //获取到所有回复-回复的地址
        List<String> all = httpPage.getHtml().xpath("//div[@node-type=\"comment_list\"]/div//a[starts-with(@action-data,'more_comment=big')]/@action-data").all();
        for (String uri : all) {
            if(StringUtils.isNotBlank(uri)){
                String url = "https://weibo.com/aj/v6/comment/big?ajwvr=6&" + StringEscapeUtils.unescapeHtml(uri) + "&from=singleWeiBo";
                CrawlerRequestRecord commentCommentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .needWashed(true)
                        .copyResultTags()
                        .build();

                commentCommentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
                parsedLinks.add(commentCommentRecord);
            }
        }
    }

    //获取评论的下一页
    private CrawlerRequestRecord getNextCommentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,String nextArticleCommentUrl){
        String requestUrl = httpPage.getRequest().getUrl();
        //获取截止当前页的总评论数
        List<NameValuePair> parse = URLEncodedUtils.parse(requestUrl.split("\\?")[1], Charset.defaultCharset());
        int currentSumComment = 0;
        for (NameValuePair nameValuePair : parse) {
            if ("sum_comment_number".equals(nameValuePair.getName())) {
                currentSumComment = Integer.parseInt(nameValuePair.getValue());
                break;
            }
        }
//        int sumComment = Integer.parseInt(httpPage.getJson().jsonPath($_type + ".data.count").get());
//        if (currentSumComment < sumComment && StringUtils.isNotEmpty(nextArticleCommentUrl)) {
        nextArticleCommentUrl = "https://weibo.com/aj/v6/comment/big?ajwvr=6&" + StringEscapeUtils.unescapeHtml(nextArticleCommentUrl) + "&from=singleWeiBo";
        CrawlerRequestRecord nextArticleCommentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextArticleCommentUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();
        nextArticleCommentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        return nextArticleCommentRecord;
//        }
    }

    private void commentReplyUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        String htmlJson = "";
        try {
            htmlJson = httpPage.getJson().jsonPath($_type + ".data.html").get();
        } catch (Exception e) {
            log.error("commentComment page down load is fail!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        Html html = new Html(htmlJson);
        httpPage.setHtml(html);
        //既然这个地址可以生成，说明一定存在回复的回复，若是数量为0就说明是页面下载有问题
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@comment_id]").nodes();
        if (nodes.size() < 1) {
            log.error("commentComment page(" + requestUrl + ") comment num is 0 !will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        //回复的回复翻页
        String commentCommentTurnUrl = httpPage.getHtml().xpath("//a[@action-type=\"click_more_child_comment_big\"]/@action-data").get();
        if (StringUtils.isBlank(commentCommentTurnUrl)) {
            return ;
        }

        commentCommentTurnUrl = "https://weibo.com/aj/v6/comment/big?ajwvr=6&" + StringEscapeUtils.unescapeHtml(commentCommentTurnUrl) + "&from=singleWeiBo";
        CrawlerRequestRecord commentCommentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(commentCommentTurnUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();

        commentCommentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        parsedLinks.add(commentCommentRecord);
    }
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord,page));
        }
        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord,page));
        }

        if(crawlerResultTags.hasDataType(comment)){
            crawlerDataList.addAll(washComment(crawlerRecord,page));
        }
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.contains("?") ? itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("?")) : itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String author = httpPage.getHtml().xpath("//a[@class=\"W_f14 W_fb S_txt1\"]/text()").get();

        List<String> textList = httpPage.getHtml().xpath("//div[@class=\"WB_text W_f14\"]//text()").all();
        StringBuffer content = new StringBuffer();
        for (String text : textList) {
            if (StringUtils.isEmpty(text)) {
                continue;
            }
            content.append(text).append(" ");
        }

        String releaseTime = httpPage.getHtml().xpath("//div[@node-type=\"follow_recommend_box\"]/preceding-sibling::div/a[@node-type=\"feed_list_item_date\"]/text()").get();
        if (StringUtils.isEmpty(releaseTime)) {
            return null;
        }

        CrawlerData article = null;
        try {
            article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(articleReleaseTime(releaseTime.trim()))
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Content, content.toString().trim())
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return article;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.contains("?") ? itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("?")) : itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String forwards = httpPage.getHtml().xpath("//span[@node-type=\"forward_btn_text\"]//em[@class=\"W_ficon ficon_forward S_ficon\"]/following-sibling::em").get();
        forwards = "转发".equals(forwards) ? "0" : washNum(forwards);
        String comments = httpPage.getHtml().xpath("//span[@node-type=\"comment_btn_text\"]//em[@class=\"W_ficon ficon_repeat S_ficon\"]/following-sibling::em").get();
        comments = "评论".equals(comments) ? "0" : washNum(comments);
        String likes = httpPage.getHtml().xpath("//ul[@class=\"WB_row_line WB_row_r4 clearfix S_line2\"]//span[@node-type=\"like_status\"]/em[@class=\"W_ficon ficon_praised S_txt2\"]/following-sibling::em").get();
        likes = "赞".equals(likes) ? "0" : washNum(likes);

        String releaseTime = httpPage.getHtml().xpath("//div[@node-type=\"follow_recommend_box\"]/preceding-sibling::div/a[@node-type=\"feed_list_item_date\"]/text()").get();
        if (StringUtils.isEmpty(releaseTime)) {
            return null;
        }
        CrawlerData interaction = null;
        try {
            interaction = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(articleReleaseTime(releaseTime.trim()))
                    .addContentKV(Field_I_Forwards, forwards)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_I_Comments, comments)
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return interaction;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        List<CrawlerData> commentList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.contains("?") ? articleUrl.substring(articleUrl.lastIndexOf("/") + 1,articleUrl.lastIndexOf("?")) :articleUrl.substring(articleUrl.lastIndexOf("/") + 1);

        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@comment_id]").nodes();
        for (Selectable node : nodes) {
            String author = node.xpath("./div/div[@class=\"WB_text\"]//a[contains(@usercard,'id')]/text()").get();
            List<String> textAll = node.xpath("./div/div[@class=\"WB_text\"]/text()|./div/div[@class=\"WB_text\"]/a[@extra-data]//text()").all();
            StringBuffer content = new StringBuffer();
            for (String text : textAll) {
                if (StringUtils.isBlank(text)) {
                    continue;
                }
                content.append(text).append(" ");
            }

            String releaseTime = node.xpath("./div/div[@class=\"WB_func clearfix\"]/div[@class=\"WB_from S_txt2\"]/text()").get();
            if (StringUtils.isEmpty(releaseTime)) {
                continue;
            }

            String commentId = node.xpath("./@comment_id").get();

            String likes = node.xpath("./div/div[@class=\"WB_func clearfix\"]//em[@class=\"W_ficon ficon_praised S_txt2\"]/following-sibling::em").get();
            likes = StringUtils.isEmpty(likes) || "赞".equals(likes) ? "0" : likes;
            String comments = node.xpath("./div/div[@class=\"list_box_in S_bg3\"]//div[@class=\"WB_text\"]/a[starts-with(@action-data,'more_comment=big')]").get();
            if (StringUtils.isEmpty(comments)) {
                List<Selectable> nodes1 = node.xpath(".//div[@node-type=\"child_comment\"]/div[@comment_id]").nodes();
                if(nodes1 != null){
                    comments = String.valueOf(nodes1.size());
                }else{
                    comments = "0";
                }
            } else {
                Pattern compile = Pattern.compile("\\d+");
                Matcher matcher = compile.matcher(comments);
                while (matcher.find()) {
                    comments = matcher.group(0);
                    break;
                }
            }

            try {
                CrawlerData comment = CrawlerData.builder()
                        .data(requestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .url(itemUrl)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .releaseTime(articleReleaseTime(releaseTime.trim()))
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Content, content.toString().trim())
                        .addContentKV(Field_I_Likes, likes)
                        .addContentKV(Field_I_Comments, comments)
                        .build();
                commentList.add(comment);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
        return commentList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {},detail is [{}]", crawlerRecord.getHttpRequest().getUrl(),JSONObject.toJSONString(crawlerRecord));
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        HttpRequest httpRequest = crawlerRecord.getHttpRequest();
        String method = httpRequest.getMethod();
        if(StringUtils.isNotBlank(method) && method.toUpperCase().equals(HttpConstant.Method.POST)){
            HttpRequest httpRequest1 = crawlerRequestRecord.getHttpRequest();
            httpRequest1.setMethod(HttpConstant.Method.POST);
            httpRequest1.setRequestBody(httpRequest.getRequestBody());
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.setFilter(crawlerRecord.getFilter());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static String washNum(String num){
        if(StringUtils.isBlank(num)){
            return "0";
        }

        String result = "0";
        if(num.contains("+")){
            result = num.replace("+","");
        }else{
            return num;
        }

        if(result.contains("万")){
            result = result.replace("万","");
            result = String.valueOf( Double.parseDouble(result)* 10000);
            result = result.substring(0,result.indexOf("."));
        }
        return result;
    }

    private static Long articleReleaseTime(String timeStr) throws ParseException {
        if (StringUtils.isBlank(timeStr)) {
            return new Long("0");
        }

        LocalDate now = LocalDate.now();//今天(yyyy-MM-dd)
        timeStr = timeStr.trim();
        if(timeStr.matches("\\d+年第\\d+楼 \\d+月\\d+日 \\d{2}:\\d{2}")){
            String[] split = timeStr.split("楼");
            String time = split[0].split("年")[0] + split[1].trim();
            return DateUtils.parseDate(time,"yyyyMM月dd日 HH:mm").getTime();
        }

        if (timeStr.contains("日") && timeStr.contains("月")) {
            String time = timeStr.contains("年") ? timeStr : (timeStr.contains("楼") ? now.getYear() + "年" + timeStr.split("楼")[1].trim():now.getYear() + "年" + timeStr);
            return DateUtils.parseDate(time, "yyyy年MM月dd日 HH:mm").getTime();
        }

        if (timeStr.startsWith("今天")) {
            String time = timeStr.replace("今天", now.toString());
            return DateUtils.parseDate(time, "yyyy-MM-ddHH:mm").getTime();
        }

        if (timeStr.startsWith("昨天")) {
            String yesterday = now.minus(1, ChronoUnit.DAYS).toString();
            String time = timeStr.replace("昨天", yesterday);
            return DateUtils.parseDate(time, "yyyy-MM-ddHH:mm").getTime();
        }

        if (timeStr.endsWith("刚刚")) {
            return System.currentTimeMillis();
        }
        //59分钟前  7秒前
        if (timeStr.endsWith("秒前")) {
            int number = Integer.valueOf(timeStr.substring(0, timeStr.indexOf("秒前")));
            return (System.currentTimeMillis() - number * MILLIS_PER_SECOND);
        }

        if (timeStr.endsWith("分钟前")) {
            int number = Integer.valueOf(timeStr.substring(0, timeStr.indexOf("分钟前")));
            return (System.currentTimeMillis() - number * MILLIS_PER_MINUTE);
        }

        if (timeStr.matches("\\d{4}-\\d+-\\d+ \\d+:\\d+")) {
            return DateUtils.parseDate(timeStr, "yyyy-MM-dd HH:mm").getTime();
        }

        if (timeStr.contains("楼")) {
            timeStr = timeStr.replaceAll("第\\d+楼", "");
            return DateUtils.parseDate(timeStr.trim(), "yyyy-MM-dd HH:mm").getTime();
        }

        return new Long("1");
    }

    public static void main(String[] args) {
        String s = washNum("100万+");
        System.out.println(s);
    }
}
