package com.chance.cc.crawler.development.scripts.bilibili;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConstant;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2021-02-01 18:53:53
 * @email okprog@sina.com
 */
public class BilibiliCrawlerScript extends CrawlerCommonScript {

    private static final String keysRegex = "https?://\\S*v1/meta/bilibili/keys\\S*";
    public static final String indexRegex = "https://www\\.bilibili\\.com/";
    public static final String listUrlRegex = "https://search\\.bilibili\\.com/all\\?keyword=\\S*";
    public static final String articleUrlRegex = "https?://www\\.bilibili\\.com/video/\\S*";
    public static final String commentUrlRegex = "https://api\\.bilibili\\.com/x/v2/reply\\?\\S*";
    public static final String commentReplyUrlRegex = "https://api.bilibili.com/x/v2/reply/reply\\S*";

    public static final String searchUrlFormat = "https://search.bilibili.com/all?keyword=%s&from_source=nav_suggest_new&order=pubdate&duration=0&tids_1=0&page=%s";
    public static final String commentUrlFormat = "https://api.bilibili.com/x/v2/reply?pn=%s&type=1&oid=%s&sort=0";
    public static final String commentReplyUrlFormat = "https://api.bilibili.com/x/v2/reply/reply?jsonp=jsonp&pn=%s&type=1&oid=%s&ps=10&root=%s";

    private static Logger logger = LoggerFactory.getLogger(BilibiliCrawlerScript.class);

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();

        if (supportSourceRecords == null || supportSourceRecords.size() < 1) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        CrawlerRequestRecord keywordRecord = supportSourceRecords.get(0);
        String keywordUrl = keywordRecord.getHttpRequest().getUrl();
        if (keywordUrl.matches(keysRegex)) {
            try {
                JSONObject jsonObject = JSONObject.parseObject(keywordRecord.getInternalDownloadPage().getRawText());
                if (jsonObject.getIntValue("status") == 0) {
                    JSONArray contents = jsonObject.getJSONArray("content");
                    for (Object content : contents) {
                        String keyword = ((JSONObject) content).getString("keyword");
                        String searchUrl = String.format(searchUrlFormat, URLEncoder.encode(keyword, "utf-8"), 1);
                        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(requestRecord)
                                .httpUrl(searchUrl)
                                .recordKey(searchUrl)
                                .releaseTime(System.currentTimeMillis())
                                .needWashed(false)
                                .needParsed(true)
                                .notFilterRecord()
                                .copyBizTags()
                                .copyResultTags()
                                .build();
                        crawlerRequestRecord.getHttpRequest().setMethod(HttpConstant.Method.GET);
                        crawlerRequestRecord.tagsCreator().bizTags().addKeywords(keyword);
                        allItemRecords.add(crawlerRequestRecord);
                    }
                }
            } catch (Exception e) {
                logger.error(e.getMessage(), e);
            }
        }
        if (allItemRecords.isEmpty()) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        return allItemRecords;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        int statusCode = httpPage.getStatusCode();
        String lastRequestUrl = lastRequest.getUrl();
        if (StringUtils.isBlank(httpPage.getRawText()) || !httpPage.isDownloadSuccess() || statusCode != 200) {
            Map<String, Object> extras = lastRequest.getExtras();
            int downloadTimes = 1;
            if (null == extras) {
                extras = new HashMap<>();
                extras.put("downloadTimes", downloadTimes);
            } else {
                try {
                    downloadTimes = Integer.parseInt((String) extras.get("downloadTimes"));
                    extras.put("downloadTimes", downloadTimes + 1);
                } catch (Exception e) {
                    extras.put("downloadTimes", downloadTimes);
                }
            }
            if (null != httpPage.getRawText() && httpPage.getRawText().contains("您访问的口碑存在异常") || downloadTimes > 10) {
                logger.error("页面不存在：" + statusCode);
                return parsedLinks;
            }

            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            return parsedLinks;
        }
        if (lastRequestUrl.matches(listUrlRegex)) {
            return parseListLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex)) {
            return parseArticleLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(commentUrlRegex)) {
            return parseCommentLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(commentReplyUrlRegex)) {
            return commentReplyUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> commentReplyUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String page = httpPage.getJson().jsonPath($_type + ".data.page").get();
        if (StringUtils.isNotBlank(page)) {
            JSONObject jsonObject = JSONObject.parseObject(page);
            int count = jsonObject.getIntValue("count");
            int num = jsonObject.getIntValue("num");
            int size = jsonObject.getIntValue("size");
            if (size * num >= count) {
                return parsedLinks;
            }

            //翻页
            String requestUrl = httpPage.getRequest().getUrl();
            String[] split = requestUrl.split("\\?");
            String nextUrl = split[0] + "?";
            List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
            for (NameValuePair nameValuePair : parse) {
                String name = nameValuePair.getName();
                String value = nameValuePair.getValue();
                if ("pn".equals(name)) {
                    if(Integer.parseInt(value) < 2){
                        return null;
                    }
                    nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) - 1) + "&";
                } else {
                    nextUrl = nextUrl + name + "=" + value + "&";
                }
            }
            nextUrl = nextUrl.substring(0, nextUrl.length() - 1);

            CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .needWashed(true)
                    .copyResultTags()
                    .build();
            requestRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
            parsedLinks.add(requestRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(lastRequestUrl);
        if (null != urlParams && urlParams.size() > 0) {
            String oid = (String) urlParams.get("oid");
            crawlerRequestRecord.setNeedWashPage(true);
            int curPn = Integer.parseInt((String) urlParams.get("pn"));
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONArray replies = pageObj.getJSONObject("data").getJSONArray("replies");
            if (replies != null && replies.size() > 0) {
                String commentUrl = String.format(commentUrlFormat, (curPn + 1), oid);
                CrawlerRequestRecord cmtRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                cmtRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
                parsedLinks.add(cmtRecord);


                //回复的回复
                for (Object reply : replies) {
                    JSONObject jsonObject = (JSONObject) reply;
                    String replyOid = jsonObject.getString("oid");
                    String root = jsonObject.getString("rpid_str");
                    String rcount = jsonObject.getString("rcount");
                    if (StringUtils.isNotBlank(rcount) && Integer.parseInt(rcount) > 0) {
                        int page = Integer.parseInt(rcount) / 10;
                        if(Integer.parseInt(rcount) % 10 != 0){
                            page = page + 1;
                        }
                        String url = String.format(commentReplyUrlFormat,page, replyOid, root);
                        CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .copyBizTags()
                                .needWashed(true)
                                .resultLabelTag(comment)
                                .build();
                        requestRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
                        parsedLinks.add(requestRecord);
                    }
                }
            }
        }

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setNeedWashPage(true);
        crawlerRequestRecord.getHttpRequest().addExtra("articleUrl", crawlerRequestRecord.getHttpRequest().getUrl());
        Matcher mtAInfo = Pattern.compile("\"upData\":(.*),\"level_info").matcher(httpPage.getRawText());
        String aInfo = "";
        while (mtAInfo.find()) {
            aInfo = mtAInfo.group(0).split("upData\":")[1].split(",\"level_info")[0] + "}";
        }
        JSONObject authorObj = JSONObject.parseObject(aInfo);
        if (authorObj == null) {
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            crawlerRequestRecord.setNeedWashPage(false);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        //判断是否采集评论
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag();
        if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
            crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag(comment.enumVal());
            if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                logger.error("bilibili crawler comment need to filter information!");
                return parsedLinks;
            }
            Matcher mtInfo = Pattern.compile("\"stat\":(.*),\"dynamic\"").matcher(httpPage.getRawText());
            String info = "";
            while (mtInfo.find()) {
                info = mtInfo.group(0).split("stat\":")[1].split(",\"dynamic")[0];
            }
            JSONObject jsonObject = JSONObject.parseObject(info);
            String aid = jsonObject.getString("aid");
            int reply = jsonObject.getIntValue("reply");
            if (reply > 0) {
                KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
                String commentUrl = String.format(commentUrlFormat, 1, aid);
                CrawlerRequestRecord cmtRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                cmtRecord.setFilter(filterInfoRecord.getFilter());
                cmtRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                cmtRecord.getHttpRequest().addExtra("articleUrl", crawlerRequestRecord.getHttpRequest().getUrl());
                parsedLinks.add(cmtRecord);
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(lastRequestUrl);
        if (null != urlParams && urlParams.size() > 0) {
            int curPage = Integer.parseInt((String) urlParams.get("page"));
            String keyword = (String) urlParams.get("keyword");
            String nextPageUrl = String.format(searchUrlFormat, keyword, (curPage + 1));
            CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(nextPageRecord);
        }
        List<Selectable> itemNodes = httpPage.getHtml().xpath("//ul[@class=\"video-list clearfix\"]/li").nodes();
        for (Selectable itemNode : itemNodes) {
            String itemUrl = itemNode.xpath("./a/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            }
            if (!itemUrl.startsWith("http")) {
                itemUrl = "https:" + itemUrl;
            }
            String pubDate = itemNode.xpath("./div[@class=\"info\"]/div[@class=\"tags\"]/span[@class=\"so-icon time\"]/text()").get();
            if (StringUtils.isBlank(pubDate)) {
                logger.error("this url [ {} ] get null pubDate", itemUrl);
                continue;
            }

            pubDate = pubDate.trim();
            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(DateUtils.parseDate(pubDate, "yyyy-MM-dd").getTime())
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                logger.error("parse date error");
            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)) {
            crawlerDataList.addAll(washArticle(crawlerRequestRecord, httpPage));

        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)) {
            if (requestUrl.matches(commentReplyUrlRegex)) {
                crawlerDataList.addAll(washCommentReply(crawlerRequestRecord,httpPage));
            } else {
                crawlerDataList.addAll(washComment(crawlerRequestRecord, httpPage));
            }
        }

        return crawlerDataList;
    }

    public List<CrawlerData> washCommentReply(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerCommentDataList = new ArrayList<>();
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        JSONArray replies = pageObj.getJSONObject("data").getJSONArray("replies");
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
        String articleUrl = (String) extras.get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1).split("\\?")[0];

        for (Object reply : replies) {
            JSONObject cmtObj = (JSONObject) reply;
            String commentId = cmtObj.getString("rpid");
            String content = cmtObj.getJSONObject("content").getString("message");
            String device = cmtObj.getJSONObject("content").getString("device");
            String author = cmtObj.getJSONObject("member").getString("uname");
            String authorId = cmtObj.getString("mid");
            String likes = cmtObj.getString("like");
            String comments = cmtObj.getString("rcount");
            String pubTime = cmtObj.getString("ctime");
            long releaseTime = new Long(pubTime + "000");

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .url(articleUrl)
                    .dataId(StringUtils.joinWith("-", domain(), comment, commentId))
                    .parentId(StringUtils.joinWith("-", domain(), article, articleKey))
                    .releaseTime(releaseTime)
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .resultLabelTag(comment)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .build();
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_PostClient, device);
            crawlerCommentDataList.add(crawlerData);


            CrawlerData crawlerInteractionData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .url(articleUrl)
                    .dataId(StringUtils.joinWith("-", domain(), interaction, commentId))
                    .parentId(StringUtils.joinWith("-", domain(), comment, commentId))
                    .releaseTime(releaseTime)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_I_Comments, comments)
                    .resultLabelTag(interaction)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .build();
            crawlerCommentDataList.add(crawlerInteractionData);
        }
        return crawlerCommentDataList;
    }

    public List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        String lastRequestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        Html html = httpPage.getHtml();
        String title = html.xpath("//div[@id=\"viewbox_report\"]/h1//text()").get();
        if(title.equals("活动作品")){
            title = httpPage.getHtml().xpath("//h1[@class=\"video-title\"]/@title").get();//需检查是否title获取有问题
        }
        String content = html.xpath("//div[@class=\"info open\"]/text()|//div[@class=\"desc-info desc-v2 open\"]//text()").get();//需要检查是否全部的视频都可以这样拿到
        Matcher mtAInfo = Pattern.compile("\"upData\":(.*),\"level_info").matcher(httpPage.getRawText());
        String aInfo = "";
        while (mtAInfo.find()) {
            aInfo = mtAInfo.group(0).split("upData\":")[1].split(",\"level_info")[0] + "}";
        }
        JSONObject authorObj = JSONObject.parseObject(aInfo);
        String authorId = authorObj.getString("mid");
        String author = authorObj.getString("name");
        String fans = authorObj.getString("fans");
        String pubTime = html.xpath("//div[@class=\"video-data\"]/span[3]/text()").get();
        List<String> tags = html.xpath("//ul[@class=\"tag-area clearfix\"]/li//text()").all();
        List<String> allTags = new ArrayList<>();
        for (String tag : tags) {
            if (StringUtils.isNotBlank(tag.trim())) {
                allTags.add(tag.trim());
            }
        }

        String articleKey = lastRequestUrl.substring(lastRequestUrl.lastIndexOf("/") + 1).split("\\?")[0];
        long releaseTime = crawlerRequestRecord.getReleaseTime();
        try {
            releaseTime = DateUtils.parseDate(pubTime, "yyyy-MM-dd HH:mm:ss").getTime();
        } catch (Exception e) {
            logger.error("parse date error");
        }
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .url(lastRequestUrl)
                .releaseTime(releaseTime)
                .dataId(StringUtils.joinWith("-", domain(), article, articleKey))
                .addContentKV(Field_Title, title)
                .addContentKV(Field_Content, content)
                .addContentKV(Field_Author, author)
                .addContentKV(Field_Author_Id, authorId)
                .addContentKV(Field_Author_Follows, fans)
                .resultLabelTag(article)
                .build();
        crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type, allTags);
        crawlerArticleDataList.add(crawlerData);

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)) {
            crawlerArticleDataList.addAll(washInteraction(crawlerRequestRecord, httpPage));
        }
        return crawlerArticleDataList;
    }

    public List<CrawlerData> washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerInteractionDataList = new ArrayList<>();
        String lastRequestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        String articleKey = lastRequestUrl.substring(lastRequestUrl.lastIndexOf("/") + 1).split("\\?")[0];

        Matcher mtVInfo = Pattern.compile("\"stat\":(.*),\"dynamic\"").matcher(httpPage.getRawText());
        String vInfo = "";
        while (mtVInfo.find()) {
            vInfo = mtVInfo.group(0).split("stat\":")[1].split(",\"dynamic")[0];
        }
        JSONObject infoObj = JSONObject.parseObject(vInfo);
        String views = infoObj.getString("view");
        String collections = infoObj.getString("favorite");
        String likes = infoObj.getString("like");
        String comments = infoObj.getString("reply");
        String forwards = infoObj.getString("share");
        String coin = infoObj.getString("coin");

        Html html = httpPage.getHtml();
        String pubTime = html.xpath("//div[@class=\"video-data\"]/span[3]/text()").get();
        long releaseTime = crawlerRequestRecord.getReleaseTime();
        try {
            releaseTime = DateUtils.parseDate(pubTime, "yyyy-MM-dd HH:mm:ss").getTime();
        } catch (Exception e) {
            logger.error("parse date error");
        }
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .url(lastRequestUrl)
                .dataId(StringUtils.joinWith("-", domain(), interaction, articleKey))
                .parentId(StringUtils.joinWith("-", domain(), article, articleKey))
                .releaseTime(releaseTime)
                .addContentKV(Field_I_Likes, likes)
                .addContentKV(Field_I_Comments, comments)
                .addContentKV(Field_I_Collection, collections)
                .addContentKV(Field_I_Forwards, forwards)
                .addContentKV(Field_I_Views, views)
                .addContentKV(Field_I_Coin_Operated,coin)
                .resultLabelTag(interaction)
                .build();
        crawlerInteractionDataList.add(crawlerData);
        return crawlerInteractionDataList;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerCommentDataList = new ArrayList<>();
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        JSONArray replies = pageObj.getJSONObject("data").getJSONArray("replies");
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
        String articleUrl = (String) extras.get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1).split("\\?")[0];

        KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
        for (Object reply : replies) {
            JSONObject cmtObj = (JSONObject) reply;
            String commentId = cmtObj.getString("rpid");
            String content = cmtObj.getJSONObject("content").getString("message");
            String device = cmtObj.getJSONObject("content").getString("device");
            String author = cmtObj.getJSONObject("member").getString("uname");
            String authorId = cmtObj.getString("mid");
            String likes = cmtObj.getString("like");
            String comments = cmtObj.getString("rcount");
            String pubTime = cmtObj.getString("ctime");
            long releaseTime = new Long(pubTime + "000");

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .url(articleUrl)
                    .dataId(StringUtils.joinWith("-", domain(), comment, commentId))
                    .parentId(StringUtils.joinWith("-", domain(), article, articleKey))
                    .releaseTime(releaseTime)
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .resultLabelTag(comment)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .build();
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_PostClient, device);
            crawlerCommentDataList.add(crawlerData);


            if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)) {
                CrawlerData crawlerInteractionData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .url(articleUrl)
                        .dataId(StringUtils.joinWith("-", domain(), interaction, commentId))
                        .parentId(StringUtils.joinWith("-", domain(), comment, commentId))
                        .releaseTime(releaseTime)
                        .addContentKV(Field_I_Likes, likes)
                        .addContentKV(Field_I_Comments, comments)
                        .resultLabelTag(interaction)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .build();
                crawlerCommentDataList.add(crawlerInteractionData);
            }
        }
        return crawlerCommentDataList;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(keysRegex);
        addUrlRegular(indexRegex);
        addUrlRegular(listUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentUrlRegex);
        addUrlRegular(commentReplyUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        return true;
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return "bilibili";
    }

    /**
     * 将url参数转换成map
     *
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    public static Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = null;
        if (url.contains("?")) {
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    public static void main(String[] args) {
        String s = "25";
        System.out.println(Integer.parseInt(s)/10 + 1);
    }
}
