package com.chance.cc.crawler.development.scripts.bitauto.video;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.DigestUtils;

import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Field_Author_Follows;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Series;

/**
 * @author lt
 * @version 1.0
 * @date 2020-12-18 15:34:14
 * @email okprog@sina.com
 */
public class BitAutoVideoCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(BitAutoVideoCrawlerScript.class);

    private static final String indexRegex = "https?://v\\.yiche\\.com/";
    private static final String listUrlRegex = "https?://v\\.yiche\\.com/cate_\\d*_\\d*_\\d*\\.html";
    private static final String videoUrlRegexNoDate = "https?://vc\\.yiche\\.com/vplay/\\d*\\.html#no_date";
    private static final String videoUrlRegex = "https?://vc\\.yiche\\.com/vplay/\\d*\\.html";
    private static final String viewsUrlRegex = "https://cmsapi\\.bitauto\\.com/videoforum/Promotion/GetVideoByVideoIds\\?vfIds=\\S*";
    private static final String likesUrlRegex = "https://vc\\.yiche\\.com/web_api/information_api/api/v1/support/support_info\\S*";
    private static final String commentUrlRegex = "https?://newsapi\\.yiche\\.com/comment/comment/getdata\\S*";

    private static final String listUrlFormat = "https://v.yiche.com/cate_%s_0_%s.html";
    private static final String viewsUrlFormat = "https://cmsapi.bitauto.com/videoforum/Promotion/GetVideoByVideoIds?vfIds=%s";
    private static final String likesUrlFormat = "https://vc.yiche.com/web_api/information_api/api/v1/support/support_info?cid=508&param=%7B%22contentId%22%3A%22#contentId%22%2C%22contentType%22%3A%224%22%7D";
    private static final String commentUrlFormat = "https://newsapi.yiche.com/comment/comment/getdata?productId=%s&objectId=%s&pageIndex=%s&pageSize=%s&isHot=false&_=%s";

    private static final String scriptSite = "video";

    @Override
    public String domain() {
        return "bitauto";
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexRegex);
        addUrlRegular(listUrlRegex);
        addUrlRegular(videoUrlRegexNoDate);
        addUrlRegular(videoUrlRegex);
        addUrlRegular(commentUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (StringUtils.isBlank(httpPage.getRawText()) || !httpPage.isDownloadSuccess() || statusCode != 200) {
            Map<String, Object> extras = lastRequest.getExtras();
            int downloadTimes = 1;
            if (null == extras) {
                extras = new HashMap<>();
                extras.put("downloadTimes", downloadTimes);
                crawlerRequestRecord.getHttpRequest().setExtras(extras);
            } else {
                try {
                    downloadTimes = Integer.parseInt((String) extras.get("downloadTimes"));
                    extras.put("downloadTimes", downloadTimes + 1);
                } catch (Exception e) {
                    extras.put("downloadTimes", downloadTimes);
                }
            }
            if (statusCode == 404 || downloadTimes > 10) {
                logger.error("页面不存在：{},本页面下载次数：{}", statusCode, downloadTimes);
                return parsedLinks;
            }

            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            logger.error("页面下载状态：{}，状态码：{}，内容为空：{}，实行回推", httpPage.isDownloadSuccess(), statusCode, StringUtils.isBlank(httpPage.getRawText()));
            return parsedLinks;
        }
        if (lastRequestUrl.matches(indexRegex)) {
            return parseCategoryLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(listUrlRegex)) {
            return parseListLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(videoUrlRegexNoDate)) {
            return parseItemDateLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequestUrl);
        }
        if (lastRequestUrl.matches(videoUrlRegex)) {
            return parseItemLinks(crawlerRequestRecord, parsedLinks, lastRequest, lastRequestUrl);
        }
        if (lastRequestUrl.matches(commentUrlRegex)) {
            return parseCommentLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequest);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest) {
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        int code = pageObj.getIntValue("code");
        String msg = pageObj.getString("msg");
        if (code != 0 || !"OK".equals(msg)) {
            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        int size = pageObj.getJSONObject("result").getIntValue("size");
        int page = pageObj.getJSONObject("result").getIntValue("page");
        int total = pageObj.getJSONObject("result").getIntValue("total");
        int listSize = pageObj.getJSONObject("result").getJSONArray("list").size();
        int currentTotal = (page - 1) * 50 + size;
        if (total > currentTotal) {
            crawlerRequestRecord.setNeedWashPage(true);
            Map<String, Object> extras = lastRequest.getExtras();
            int currentNum = (int) extras.get("currentNum");
            String videoId = (String) extras.get("videoId");
            String productId = (String) extras.get("productId");
            String nextCommentUrl = String.format(commentUrlFormat, productId, videoId, (currentNum + 1), 50, System.currentTimeMillis());
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextCommentUrl)
                    .httpUrl(nextCommentUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .needWashed(true)
                    .copyBizTags()
                    .copyScheduleTags()
                    .notFilterRecord()
                    .build();
            Map<String, Object> commentExtras = copyExtras(extras);
            commentExtras.put("currentNum", currentNum + 1);
            commentRecord.getHttpRequest().setExtras(commentExtras);
            parsedLinks.add(commentRecord);

        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseItemLinks(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest, String lastRequestUrl) {
        String videoId = lastRequestUrl.substring(lastRequestUrl.lastIndexOf("/") + 1).split("\\.")[0];
        String viewsUrl = String.format(viewsUrlFormat, videoId);
        CrawlerRequestRecord viewsRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(viewsUrl)
                .httpUrl(viewsUrl)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .releaseTime(System.currentTimeMillis())
                .build();
        parsedLinks.add(viewsRecord);

        String likesUrl = likesUrlFormat.replace("#contentId", videoId);
        CrawlerRequestRecord likesRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(likesUrl)
                .httpUrl(likesUrl)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .releaseTime(System.currentTimeMillis())
                .build();
        String timestamp = String.valueOf(System.currentTimeMillis());
        String sign = generatorSignFromUrl(likesUrl, timestamp);
        HttpRequest likesRequest = likesRecord.getHttpRequest();
        likesRequest.addHeader("Host", "vc.yiche.com");
        likesRequest.addHeader("Referer", lastRequestUrl);
        likesRequest.addHeader("User-Agent", getRandomUA());
        likesRequest.addHeader("x-platform", "pc");
        likesRequest.addHeader("x-sign", sign);
        likesRequest.addHeader("x-timestamp", timestamp);
        likesRequest.addHeader("x-user-guid", UUID.randomUUID().toString());
        parsedLinks.add(likesRecord);

        // 评论的翻页
        Map<String, Object> extras = lastRequest.getExtras();
        String commentUrl = String.format(commentUrlFormat, 7, videoId, 1, 50, System.currentTimeMillis());
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .recordKey(commentUrl)
                .httpUrl(commentUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .resultLabelTag(comment)
                .resultLabelTag(interaction)
                .needWashed(true)
                .copyBizTags()
                .copyScheduleTags()
                .notFilterRecord()
                .build();
        Map<String, Object> commentExtras = copyExtras(extras);
        commentExtras.put("currentNum", 1);
        commentExtras.put("productId", 7);
        commentExtras.put("videoId", videoId);
        commentRecord.getHttpRequest().setExtras(commentExtras);

        parsedLinks.add(commentRecord);
        return parsedLinks;
    }

    /**
     * @param url 请求Url
     * @return header的sign值
     */
    private String generatorSignFromUrl(String url, String timestamp) {
        Map<String, Object> urlParams = getUrlParams(url);
        try {
            if (null != urlParams) {
                String u = "19DDD1FBDFF065D3A4DA777D2D7A81EC";
                String cid = (String) urlParams.get("cid");
                String param = URLDecoder.decode((String) urlParams.get("param"), "utf-8");
                String s = "cid=" + cid + "&param=" + param + u + timestamp;
                return DigestUtils.md5DigestAsHex(s.getBytes());
            }
        } catch (UnsupportedEncodingException e) {
            logger.error(e.getMessage(), "url decode error");
        }
        return UUID.randomUUID().toString().replaceAll("-", "");
    }

    private List<CrawlerRequestRecord> parseItemDateLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, String lastRequestUrl) {
        String publishTime = httpPage.getHtml().xpath("//div[@class=\"video-content-box\"]/div/div/span[1]/text()").get();
        String itemUrl = lastRequestUrl.split("#")[0];
        try {
            long releaseTime = DateUtils.parseDate(publishTime, "yyyy-MM-dd HH:mm:ss").getTime();
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(itemUrl)
                    .httpUrl(itemUrl)
                    .releaseTime(releaseTime)
                    .copyResultTags()
                    .copyBizTags()
                    .copyScheduleTags()
                    .build();
            itemRecord.setInternalDownloadPage(httpPage);
            itemRecord.setDownload(false);
            itemRecord.getHttpRequest().setExtras(copyExtras(crawlerRequestRecord.getHttpRequest().getExtras()));
            parsedLinks.add(itemRecord);
        } catch (ParseException e) {
            logger.error(e.getMessage(), "parse date error");
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String lastRequestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        String[] cateSplit = lastRequestUrl.substring(lastRequestUrl.lastIndexOf("/")).split("\\.")[0].split("_");
        String cateId = cateSplit[1];
        int currentPage = Integer.parseInt(cateSplit[3]);
        if (currentPage < 100) {
            String nextPageUrl = String.format(listUrlFormat, cateId, (currentPage + 1));
            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyScheduleTags()
                    .notFilterRecord()
                    .build();
            parsedLinks.add(turnPageRequest);
        }

        List<Selectable> itemNodes = httpPage.getHtml().xpath("//div[@class=\"v-box\"]").nodes();
        for (Selectable itemNode : itemNodes) {
            String itemUrl = "https:" + itemNode.xpath("./div/div/a/@href").get() + "#no_date";
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(itemUrl)
                    .httpUrl(itemUrl)
                    .releaseTime(System.currentTimeMillis())
                    .resultLabelTag(article)
                    .resultLabelTag(interaction)
                    .copyBizTags()
                    .copyScheduleTags()
                    .needWashed(false)
                    .notFilterRecord()
                    .build();
            Map<String, Object> extras = new HashMap<>();
            extras.put("listUrl", lastRequestUrl);
            extras.put("itemUrl", itemUrl);
            itemRecord.getHttpRequest().setExtras(extras);
            parsedLinks.add(itemRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCategoryLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        List<Selectable> pageTagNodes = httpPage.getHtml().xpath("//div[@class=\"container secondary-nav\"]/ul/li").nodes();
        List<Selectable> cateNodes = new ArrayList<>();
        for (Selectable pageTagNode : pageTagNodes) {
            String pageTagUrl = "https:" + pageTagNode.xpath("./a/@href").get();
            if (pageTagUrl.replace("bitauto", "yiche").matches(listUrlRegex)) {
                cateNodes.add(pageTagNode);
            }
        }
        for (Selectable cateNode : cateNodes) {
            String listUrl = "https:" + cateNode.xpath("./a/@href").get();
            listUrl = listUrl.replace("bitauto", "yiche");
            String category = cateNode.xpath("./a/text()").get();
            CrawlerRequestRecord cateRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(listUrl)
                    .recordKey(listUrl)
                    .releaseTime(System.currentTimeMillis())
                    .needParsed(true)
                    .needWashed(false)
                    .copyBizTags()
                    .copyScheduleTags()
                    .notFilterRecord()
                    .build();
            List<String> path = new ArrayList<>();
            if (category.contains("&")) {
                category = category.split("&")[0];
            }
            path.add(category.trim());
            cateRecord.tagsCreator().bizTags().addCustomKV(Field_Path, path);
            parsedLinks.add(cateRecord);
        }
        String listUrl = "https://v.yiche.com/cate_0_0_0.html";
        List<String> path = new ArrayList<>();
        path.add("全部视频");
        CrawlerRequestRecord cateRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(listUrl)
                .recordKey(listUrl)
                .releaseTime(System.currentTimeMillis())
                .needParsed(true)
                .needWashed(false)
                .copyBizTags()
                .copyScheduleTags()
                .notFilterRecord()
                .build();
        cateRecord.tagsCreator().bizTags().addCustomKV(Field_Path, path);
        parsedLinks.add(cateRecord);
        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        HttpRequest httpRequest = crawlerRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpRequest recordHttpRequest = internalDownloadRecord.getHttpRequest();
            String recordHttpRequestUrl = recordHttpRequest.getUrl();
            if (httpRequestUrl.matches(videoUrlRegex)) {
                if (recordHttpRequestUrl.matches(viewsUrlRegex)) {
                    try {
                        JSONArray pageArray = JSONObject.parseArray(internalDownloadRecord.getInternalDownloadPage().getRawText());
                        JSONObject pageObj = pageArray.getJSONObject(0);
                        String views = pageObj.getString("PlayCount");
                        String comments = pageObj.getString("ReplyCount");
                        extras.put("views", views);
                        extras.put("comments", comments);
                    } catch (Exception e) {
                        extras.put("views", "0");
                        extras.put("comments", "0");
                    }
                }
                if (recordHttpRequestUrl.matches(likesUrlRegex)) {
                    JSONObject pageObject = JSONObject.parseObject(internalDownloadRecord.getInternalDownloadPage().getRawText());
                    if (pageObject.getIntValue("status") == 1 && "success".equalsIgnoreCase(pageObject.getString("message"))) {
                        try {
                            JSONObject dataObj = pageObject.getJSONObject("data");
                            String likes = dataObj.getString("supportCount");
                            extras.put("likes", likes);
                        } catch (Exception e) {
                            extras.put("likes", "0");
                        }
                    }
                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)) {
            crawlerDataList.addAll(washArticle(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)) {
            crawlerDataList.addAll(washInteraction(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRequestRecord, httpPage));
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        Html pageHtml = new Html(httpPage.getRawText());
        String title = pageHtml.xpath("//div[@class=\"video-title-box clearfix\"]/h2/text()").get();
        title = unescapeHtml2J(title);
        String author = pageHtml.xpath("//div[@class=\"video-user-info\"]/a/text()").get();
        Matcher authorIdMt = Pattern.compile("userId:\\s*'\\d*'").matcher(httpPage.getRawText());
        String authorId = "";
        while (authorIdMt.find()) {
            authorId = authorIdMt.group(0).split("'")[1];
        }
        List<String> allContents = pageHtml.xpath("//div[@id=\"videoContent\"]//text()").all();
        StringBuffer content = new StringBuffer();
        for (String allContent : allContents) {
            content = content.append(allContent);
        }

        List<String> allImages = pageHtml.xpath("//div[@id=\"video-container\"]/img/@src").all();
        StringBuffer imageUrl = new StringBuffer();
        for (String allImage : allImages) {
            imageUrl = imageUrl.append(allImage).append("\\x01");
        }
        String fans = pageHtml.xpath("//p[@id=\"fansCount\"]/text()").get();

        String contentId = httpRequest.getUrl().substring(httpRequest.getUrl().lastIndexOf("/") + 1).split("\\.")[0];
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .url(httpRequest.getUrl())
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), contentId))
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_Title, title)
                .addContentKV(Field_Content, unescapeHtml2J(content.toString()))
                .addContentKV(Field_Author, author)
                .addContentKV(Field_Author_Id, authorId)
                .addContentKV(Field_Author_Follows, fans)
                .addContentKV(Field_Urls, (String) extras.get("listUrl"))
                .addContentKV(Field_Images, imageUrl.toString())
                .resultLabelTag(article)
                .build();
        crawlerData.tagsCreator().bizTags().addSiteBiz("video");

        //车系信息
        List<Map<String, String>> seriesList = new ArrayList<>();
        List<Selectable> seriesNodes = pageHtml.xpath("//div[@class=\"tempWrap\"]/ul/li").nodes();
        if (null != seriesNodes && seriesNodes.size() > 0) {
            for (Selectable seriesNode : seriesNodes) {
                String seriesId = seriesNode.xpath("./a[@target]/@data-id").get();
                String seriesUrl = seriesNode.xpath("./a[@target]/@href").get();
                String seriesName = seriesNode.xpath("./a[@target]/h3/text()").get();
                Map<String, String> seriesInfo = new HashMap<>();
                seriesInfo.put("series_name", seriesName);
                seriesInfo.put("series_url", seriesUrl);
                seriesInfo.put("series_id", seriesId);
                seriesList.add(seriesInfo);
            }
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series, seriesList);
        }
        crawlerDataList.add(crawlerData);
        return crawlerDataList;
    }

    public static String unescapeHtml2J(String str) {
        int times = 0;
        while (str.contains("&") && str.contains(";")) {
            str = StringEscapeUtils.unescapeHtml(str);
            times++;
            if (times > 5) {
                break;
            }
        }
        return str;
    }

    private List<CrawlerData> washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        Html pageHtml = httpPage.getHtml();
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (lastRequestUrl.matches(videoUrlRegex)) {
            String contentId = httpRequest.getUrl().substring(httpRequest.getUrl().lastIndexOf("/") + 1).split("\\.")[0];
            String comments = (String) extras.get("comments");
            String likes = (String) extras.get("likes");
            String views = (String) extras.get("views");

            Matcher mtCmt = Pattern.compile("\\d*弹幕").matcher(httpPage.getRawText());
            while (mtCmt.find()) {
                comments = mtCmt.group(0).split("弹")[0];
            }

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .url(httpRequest.getUrl())
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), contentId))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), contentId))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_I_Comments, comments)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_I_Views, views)
                    .resultLabelTag(interaction)
                    .build();
            crawlerData.tagsCreator().bizTags().addSiteBiz("video");
            crawlerDataList.add(crawlerData);
        }
        if (lastRequestUrl.matches(commentUrlRegex)) {
            //解析列表
            JSONObject resultObj = JSONObject.parseObject(httpPage.getRawText()).getJSONObject("result");
            JSONArray commentsList = resultObj.getJSONArray("list");
            for (Object commentItem : commentsList) {
                JSONObject commentObj = (JSONObject) commentItem;
                String contentId = commentObj.getString("id");
                String likes = commentObj.getString("likeCount");
                String publishTime = commentObj.getString("createTime");
                long releaseTime = 0;
                try {
                    releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime, "yyyy-MM-dd HH:mm:ss").getTime();
                } catch (ParseException e) {
                    logger.warn(e.getMessage(), "parse date error");
                    releaseTime = crawlerRequestRecord.getReleaseTime();
                }
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .url((String) extras.get("itemUrl"))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), contentId))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                        .releaseTime(releaseTime)
                        .addContentKV(Field_I_Likes, likes)
                        .resultLabelTag(interaction)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .build();
                crawlerData.setFilter(CrawlerEnum.CrawlerRecordFilter.dateRange);
                crawlerData.tagsCreator().bizTags().addSiteBiz("video");
                crawlerDataList.add(crawlerData);
            }
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String articleKey = (String) extras.get("videoId");
        //解析列表
        JSONObject resultObj = JSONObject.parseObject(httpPage.getRawText()).getJSONObject("result");
        JSONArray commentsList = resultObj.getJSONArray("list");
        for (Object commentItem : commentsList) {
            JSONObject commentObj = (JSONObject) commentItem;
            String contentId = commentObj.getString("id");
            String content = commentObj.getString("content");
            String author = commentObj.getString("showName");
            String authorId = commentObj.getString("userId");
            String floor = commentObj.getString("floor");

            String publishTime = commentObj.getString("createTime");
            long releaseTime = 0;
            try {
                releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime, "yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                logger.warn(e.getMessage(), "parse date error");
                releaseTime = crawlerRequestRecord.getReleaseTime();
            }
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .url((String) extras.get("itemUrl"))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .releaseTime(releaseTime)
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Floor, floor)
                    .resultLabelTag(comment)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .build();
            crawlerData.setFilter(CrawlerEnum.CrawlerRecordFilter.dateRange);
            crawlerData.tagsCreator().bizTags().addSiteBiz("video");
            crawlerDataList.add(crawlerData);
        }
        return crawlerDataList;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    public static Map<String, Object> copyExtras(Map<String, Object> inExtras) {
        Map<String, Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(), entry.getValue());
        }
        return extras;
    }

    /**
     * 将url参数转换成map
     *
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = null;
        if (url.contains("?")) {
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    private static List<String> agentList = new ArrayList<>();

    static {
        agentList.add("Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/536.6");
        agentList.add("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/536.6");
        agentList.add("Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.1");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)");
        agentList.add("Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.28.3 (KHTML, like Gecko) Version/3.2.3 ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/525.28.3");
        agentList.add("Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16");
        agentList.add("Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14");
        agentList.add("Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14");
        agentList.add("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14");
        agentList.add("Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02");
        agentList.add("Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0");
        agentList.add("Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.13 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400");
    }

    private static String getRandomUA() {
        return agentList.get(RandomUtils.nextInt(0, agentList.size() - 1));
    }
}
