package com.chance.cc.crawler.development.scripts.tenxun;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.URLEncoder;
import java.text.ParseException;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Field_Author_Follows;

/**
 * @author lt
 * @version 1.0
 * @date 2021-04-14 20:28:22
 * @email okprog@sina.com
 */
public class TenXunVideoCrawlerScript extends CrawlerCommonScript {


    private static Logger logger = LoggerFactory.getLogger(TenXunVideoCrawlerScript.class);

    public static final String domain = "tenxun";
    private static final String scriptSite = "video";

    public static final String indexRegex = "https://v\\.qq\\.com/";
    public static final String keysRegex = "https?://\\S*v1/meta/" + domain + "/keys\\S*";
    public static final String searchKwListUrlRegex = "https://v\\.qq\\.com/x/search/\\S*";
    public static final String articleUrlRegex = "https?://v\\.qq\\.com/x/page/\\S*\\.html";
    public static final String commentIdUrlRegex = "https://access\\.video\\.qq\\.com/fcgi-bin/video_comment_id\\S*";
    public static final String commentsUrlRegex = "https://video\\.coral\\.qq\\.com/article/\\d*/commentnum\\S*";
    public static final String authorInfoUrlRegex = "https://nodeyun\\.video\\.qq\\.com/x/api/msite/cp_module\\S*";
    public static final String commentUrlRegex = "https://video\\.coral\\.qq\\.com/varticle/\\S*/comment/v2\\S*";

    public static final String searchKwListUrlFormat = "https://v.qq.com/x/search/?searchSession=tabid=%E5%85%A8%E9%83%A8%7C0&firstTabid=%E5%85%A8%E9%83%A8%7C0&q=#kw&preQid=e87C5-KEl6O-IyuhmQUfMDXklQdZyGfvQsJBlkQOWxox5vjrShAbLQ&queryFrom=4&cur=#pn&isNeedQc=false&filterValue=firstTabid=0%26sortTabid=1%26tabid=0%26timeLongTabid=0%26publishTimeTabid=2";
    public static final String articleUrlFormat = "https://v.qq.com/x/page/%s.html";
    public static final String commentIdUrlFormat = "https://access.video.qq.com/fcgi-bin/video_comment_id?op=3&vappid=30645497&vsecret=d38052bb634963e03eca5ce3aaf93525324d970f110f585f&vid=%s";
    public static final String commentsUrlFormat = "https://video.coral.qq.com/article/%s/commentnum?_=%s";
    public static final String authorInfoUrlFormat = "https://nodeyun.video.qq.com/x/api/msite/cp_module?vid=%s";
    public static final String commentUrlFormat = "https://video.coral.qq.com/varticle/%s/comment/v2?oriorder=t&pageflag=1&cursor=%s"; //videoId lastId

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();
        if (supportSourceRecords == null || supportSourceRecords.size() < 1) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        CrawlerRequestRecord keywordRecord = supportSourceRecords.get(0);
        String keywordUrl = keywordRecord.getHttpRequest().getUrl();
        if (keywordUrl.matches(keysRegex)) {
            try {
                JSONObject jsonObject = JSONObject.parseObject(keywordRecord.getInternalDownloadPage().getRawText());
                if (jsonObject.getIntValue("status") == 0) {
                    JSONArray objects = jsonObject.getJSONArray("content");
                    for (Object object : objects) {
                        String keyword = ((JSONObject) object).getString("keyword");
                        String listUrl = searchKwListUrlFormat.replace("#kw",URLEncoder.encode(keyword, "utf-8")).replace("#pn","1");
                        CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(requestRecord)
                                .httpUrl(listUrl)
                                .recordKey(listUrl)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .copyBizTags()
                                .build();
                        listRecord.tagsCreator().bizTags().addKeywords(keyword);
                        listRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
                        allItemRecords.add(listRecord);
                    }
                }
            } catch (Exception e) {
                logger.error("from keywords init urls failed");
                logger.error(e.getMessage(), e);
            }
        }
        if (allItemRecords.isEmpty()) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        return allItemRecords;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        if (404 == httpPage.getStatusCode()){
            logger.info("status code is 404");
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        if (doHttpPageCheck(crawlerRequestRecord,httpPage)){
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(searchKwListUrlRegex)){
            return parseListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex)){
            return parseArticleLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(commentIdUrlRegex)){
            return parseCommentIdLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(commentUrlRegex)){
            return parseCommentLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        try {
            crawlerRequestRecord.setNeedWashPage(true);
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONObject dataObj = pageObj.getJSONObject("data");
            String commentId = dataObj.getString("targetid");
            boolean hasNext = dataObj.getBooleanValue("hasnext");
            if (hasNext){
                String last = dataObj.getString("last");
                String commentUrl = String.format(commentUrlFormat,commentId,last);
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .build();
                commentRecord.getHttpRequest().setExtras(copyExtras(crawlerRequestRecord.getHttpRequest().getExtras()));
                parsedLinks.add(commentRecord);
            }
        } catch (Exception e) {
            crawlerRequestRecord.setNeedWashPage(false);
            parsedLinks.add(crawlerRequestRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCommentIdLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setNeedWashPage(true);
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        try {
            JSONObject pageObject = JSONObject.parseObject(httpPage.getRawText().substring(5));
            String commentId = pageObject.getString("comment_id");
            String articleKey = pageObject.getString("srcid");

            //内下载评论数
            String commentsUrl = String.format(commentsUrlFormat,commentId,System.currentTimeMillis());
            CrawlerRequestRecord commentsRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(commentsUrl)
                    .recordKey(commentsUrl)
                    .releaseTime(System.currentTimeMillis())
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            //内下载作者信息
            String authorInfoUrl = String.format(authorInfoUrlFormat,articleKey);
            CrawlerRequestRecord authorInfoRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(authorInfoUrl)
                    .recordKey(authorInfoUrl)
                    .releaseTime(System.currentTimeMillis())
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            parsedLinks.add(commentsRecord);
            parsedLinks.add(authorInfoRecord);

            CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
            if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
                if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                    logger.error("tenxun video crawler comment need to filter information!");
                    return parsedLinks;
                }
                KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
                //评论链接
                String commentUrl = String.format(commentUrlFormat,commentId,0); //使用时间戳模拟device_id
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .needWashed(true)
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                commentRecord.getHttpRequest().setExtras(copyExtras(extras));
                commentRecord.setFilter(filterInfoRecord.getFilter());
                commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                parsedLinks.add(commentRecord);
            }
        }catch (Exception e){
            crawlerRequestRecord.setNeedWashPage(false);
            logger.error(e.getMessage());
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String articleKey = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1).split("\\.")[0];
        //先清洗字段
        Html html = httpPage.getHtml();
        List<String> titles = html.xpath("//h1[contains(@class,\"video_title\")]//text()").all();
        StringBuffer sbTitle = new StringBuffer();
        for (String title : titles) {
            sbTitle.append(title.trim());
        }
        List<String> contents = html.xpath("//div[contains(@class,\"video_summary\")]//p//text()").all();
        StringBuffer sbContent = new StringBuffer();
        for (String content : contents) {
            sbContent.append(content.trim());
        }
        String views = html.xpath("//meta[@itemprop=\"interactionCount\"]/@content").get();
        httpRequest.addExtra("title",sbTitle.toString());
        httpRequest.addExtra("content",sbContent.toString());
        httpRequest.addExtra("views",views);

        String commentIdUrl = String.format(commentIdUrlFormat,articleKey);
        CrawlerRequestRecord commentIdRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(commentIdUrl)
                .recordKey(commentIdUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .needWashed(true)
                .needParsed(true)
                .notFilterRecord()
                .copyResultTags()
                .copyBizTags()
                .build();
        commentIdRecord.getHttpRequest().setExtras(copyExtras(httpRequest.getExtras()));
        commentIdRecord.getHttpRequest().addExtra("articleKey",articleKey);
        commentIdRecord.getHttpRequest().addExtra("articleUrl",httpRequestUrl);
        parsedLinks.add(commentIdRecord);
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
        Html html = httpPage.getHtml();
        String next = html.xpath("//a[@class=\"page_next\"]/text()").get();
        List<Selectable> itemNodes = html.xpath("//div[contains(@class,\"result_item\")]").nodes();
        if (null != urlParams){
            String kw = (String) urlParams.get("q");
            String cur = (String) urlParams.get("cur");
            int pn = Integer.parseInt(cur);
            if (StringUtils.isNotBlank(next) && itemNodes.size() > 0){
                pn += 1;
                String listUrl = searchKwListUrlFormat.replace("#kw",kw).replace("#pn",String.valueOf(pn));
                CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(listUrl)
                        .recordKey(listUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                parsedLinks.add(listRecord);
            }
            for (Selectable itemNode : itemNodes) {
                String articleUrl = itemNode.xpath("./h2/a/@href").get();
                String pubTime = itemNode.xpath("./div/div/div[@class=\"info_item info_item_odd\"]/span[@class=\"content\"]/text()").get();
                if (StringUtils.isBlank(articleUrl) || !articleUrl.matches(articleUrlRegex)){
                    continue;
                }
                long releaseTime = 0;
                try {
                    releaseTime = cleanTime(pubTime, "yyyy-MM-dd");
                } catch (Exception e) {
                    logger.error(e.getMessage());
                    continue;
                }
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(articleUrl)
                        .recordKey(articleUrl)
                        .releaseTime(releaseTime)
                        .needParsed(true)
                        .needWashed(false)
                        .copyBizTags()
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                parsedLinks.add(itemRecord);
            }
        }
        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpRequest httpRequest = internalDownloadRecord.getHttpRequest();
            HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
            if (!internalDownloadPage.isDownloadSuccess()){
                logger.error("internal link {} ,download failed",internalDownloadPage.getRequest().getUrl());
                crawlerRecord.setNeedWashPage(false);
                crawlerRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                links.add(crawlerRecord);
                break;
            }
            String requestUrl = httpRequest.getUrl();
            if (requestUrl.matches(commentsUrlRegex)){
                try {
                    JSONObject pageObj = JSONObject.parseObject(internalDownloadPage.getRawText());
                    JSONObject dataObj = pageObj.getJSONObject("data");
                    String comments = dataObj.getString("commentnum");
                    crawlerRecord.getHttpRequest().addExtra("comments",comments);
                }catch (Exception e){
                    crawlerRecord.getHttpRequest().addExtra("comments","0");
                }

            }
            if (requestUrl.matches(authorInfoUrlRegex)){
                try {
                    JSONObject pageObj = JSONObject.parseObject(internalDownloadPage.getRawText());
                    JSONObject dataObj = pageObj.getJSONArray("blocks").getJSONObject(0);
                    String user_desc = dataObj.getString("user_desc");
                    String account_id = dataObj.getString("account_id");
                    String user_name = dataObj.getString("user_name");
                    String follows = "0";
                    if (user_desc.endsWith("粉丝")){
                        user_desc = user_desc.split("粉")[0];
                        if (user_desc.contains("万")){
                            follows = String.valueOf((int) (Double.parseDouble(user_desc) * 10000));
                        }else {
                            follows = user_desc;
                        }
                    }
                    crawlerRecord.getHttpRequest().addExtra("follows",follows);
                    crawlerRecord.getHttpRequest().addExtra("authorId",account_id);
                    crawlerRecord.getHttpRequest().addExtra("author",user_name);
                }catch (Exception e){
                    crawlerRecord.getHttpRequest().addExtra("follows","");
                    crawlerRecord.getHttpRequest().addExtra("authorId","");
                    crawlerRecord.getHttpRequest().addExtra("author","");
                }
            }

        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String requestUrl = httpRequest.getUrl();
        String articleKey = (String) extras.get("articleKey");
        String articleUrl = (String) extras.get("articleUrl");
        String rawText = httpPage.getRawText();
        if (StringUtils.isBlank(rawText)){
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){

            String title = (String) extras.get("title");
            String content = (String) extras.get("content");
            String views = (String) extras.get("views");
            String comments = (String) extras.get("comments");
            String follows = (String) extras.get("follows");
            String author = (String) extras.get("author");
            String authorId = (String) extras.get("authorId");

            CrawlerData crawlerAData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(articleUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .dataId(StringUtils.joinWith("-",domain(),site,article,articleKey))
                    .addContentKV(Field_Title,unescapeHtml2J(title))
                    .addContentKV(Field_Author,author)
                    .addContentKV(Field_Author_Id,authorId)
                    .addContentKV(Field_Author_Follows,follows)
                    .addContentKV(Field_Content,content)
                    .resultLabelTag(article)
                    .build();
            crawlerDataList.add(crawlerAData);
            if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                CrawlerData crawlerIData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(articleUrl)
                        .releaseTime(System.currentTimeMillis())
                        .dataId(StringUtils.joinWith("-",domain(),site,interaction,articleKey))
                        .parentId(StringUtils.joinWith("-",domain(),site,article,articleKey))
                        .addContentKV(Field_I_Comments,comments)
                        .addContentKV(Field_I_Views,views)
                        .resultLabelTag(interaction)
                        .build();
                crawlerDataList.add(crawlerIData);
            }
        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)){
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONObject dataObj = pageObj.getJSONObject("data");
            //取出评论列表，回复列表，用户信息列表
            JSONArray commentsLists = dataObj.getJSONArray("oriCommList");
            JSONObject repCommList = dataObj.getJSONObject("repCommList");
            JSONObject userList = dataObj.getJSONObject("userList");
            for (Object commentsList : commentsLists) {
                JSONObject commentObj = (JSONObject)commentsList;
                String commentId = commentObj.getString("id");
                String authorId= commentObj.getString("userid");
                String author = getAuthorName(authorId,userList);
                String time = commentObj.getString("time") + "000";
                String content = commentObj.getString("content");
                String likes = commentObj.getString("up");
                String comments = commentObj.getString("repnum");

                CrawlerData crawlerCData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(articleUrl)
                        .releaseTime(new Long(time))
                        .dataId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                        .parentId(StringUtils.joinWith("-",domain(),site,article,articleKey))
                        .addContentKV(Field_Content,content)
                        .addContentKV(Field_Author,author)
                        .addContentKV(Field_Author_Id,authorId)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .resultLabelTag(comment)
                        .build();
                crawlerDataList.add(crawlerCData);

                if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                    CrawlerData crawlerIData = CrawlerData.builder()
                            .data(crawlerRequestRecord,httpPage)
                            .url(articleUrl)
                            .releaseTime(new Long(time))
                            .dataId(StringUtils.joinWith("-",domain(),site,interaction,commentId))
                            .parentId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                            .addContentKV(Field_I_Likes,likes)
                            .addContentKV(Field_I_Comments,comments)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .resultLabelTag(interaction)
                            .build();
                    crawlerDataList.add(crawlerIData);
                }

                if (Integer.parseInt(comments) > 0){
                    JSONArray replies = repCommList.getJSONArray(commentId);
                    for (Object reply : replies) {
                        JSONObject replyObj = (JSONObject)reply;
                        crawlerDataList.addAll(getRepliesDatas(crawlerRequestRecord,httpPage,replyObj,userList,articleUrl,articleKey,site));
                    }

                }

            }
        }
        return crawlerDataList;
    }

    private List<CrawlerData> getRepliesDatas(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, JSONObject commentObj, JSONObject userList, String articleUrl, String articleId, String site) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String commentId = commentObj.getString("id");
        String authorId= commentObj.getString("userid");
        String author = getAuthorName(authorId,userList);
        String time = commentObj.getString("time") + "000";
        String content = commentObj.getString("content");
        String likes = commentObj.getString("up");
        String comments = commentObj.getString("repnum");

        CrawlerData crawlerCData = CrawlerData.builder()
                .data(crawlerRequestRecord,httpPage)
                .url(articleUrl)
                .releaseTime(new Long(time))
                .dataId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                .parentId(StringUtils.joinWith("-",domain(),site,article,articleId))
                .addContentKV(Field_Content,content)
                .addContentKV(Field_Author,author)
                .addContentKV(Field_Author_Id,authorId)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                .resultLabelTag(comment)
                .build();
        crawlerDataList.add(crawlerCData);

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
            CrawlerData crawlerIData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(articleUrl)
                    .releaseTime(new Long(time))
                    .dataId(StringUtils.joinWith("-",domain(),site,interaction,commentId))
                    .parentId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                    .addContentKV(Field_I_Likes,likes)
                    .addContentKV(Field_I_Comments,comments)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .resultLabelTag(interaction)
                    .build();
            crawlerDataList.add(crawlerIData);
        }
        return crawlerDataList;
    }

    private String getAuthorName(String uid, JSONObject userList){
        JSONObject userObj = userList.getJSONObject(uid);
        if (null == userObj){
            return "";
        }
        return userObj.getString("nick");
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexRegex);
        addUrlRegular(keysRegex);
        addUrlRegular(searchKwListUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentIdUrlRegex);
        addUrlRegular(commentUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return domain;
    }

    /**
     * 检查页面下载是否成功、完整
     *
     * @param crawlerRequestRecord last record
     * @param httpPage             page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200) {
            logger.error("download page {} error, status code is {}", lastRequestUrl, statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()) {
            logger.error("download page failed, check your link {}", lastRequestUrl);
            return true;
        }
        if (StringUtils.isBlank(httpPage.getRawText())) {
            logger.error("download page empty, check your link {}", lastRequestUrl);
            return true;
        }
        return false;
    }

    /**
     * 将url参数转换成map
     *
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = url;
        if (url.contains("?")) {
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    public static Map<String, Object> copyExtras(Map<String, Object> inExtras) {
        Map<String, Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(), entry.getValue());
        }
        return extras;
    }

    private static List<String> agentList = new ArrayList<>();

    static {
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/72.0.3626.101 Mobile/15E148 Safari/605.1");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/15.0b13894 Mobile/16D57 Safari/605.1.15");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/8.1.1 Mobile/16D57 Safari/605.1.15");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/16.0.14.122053 Mobile/16D57 Safari/9537.53");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPT/2 Mobile/16D57");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/12.0.5.3 Version/7.0 Mobile/16D57 Safari/9537.53");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 EdgiOS/42.10.3 Mobile/16D57 Safari/605.1.15");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 unknown BingWeb/6.9.8.1");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 MQQBrowser/9.0.3 Mobile/16D57 Safari/604.1 MttCustomUA/2 QBWebViewType/1 WKType/1");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 SearchCraft/3.4.1 (Baidu; P2 12.1.4)");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X; zh-CN) AppleWebKit/537.51.1 (KHTML, like Gecko) Mobile/16D57 UCBrowser/12.3.0.1138 Mobile AliApp(TUnionSDK/0.1.20.3)");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X; zh-cn) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/16D57 Quark/3.0.6.926 Mobile");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 MicroMessenger/7.0.3(0x17000321) NetType/WIFI Language/zh_CN");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16A366 QQ/7.8.8.420 V1_IPH_SQ_7.8.8_1_APP_A Pixel/1125 Core/WKWebView Device/Apple(iPhone X) NetType/4G QBWebViewType/1 WKType/1");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12. Mobile/16D57 Safari/600.1.4 baidubrowser/4.14.1.11 (Baidu; P2 12.1.4)");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 baiduboxapp/11.3.6.10 (Baidu; P2 12.1.4)");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/606.4.5 (KHTML, like Gecko) Mobile/16D57 QHBrowser/317 QihooBrowser/4.0.10");
        agentList.add("Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 Mb2345Browser/5.2.1");
    }

    private static String getRandomUA() {
        return agentList.get(RandomUtils.nextInt(0, agentList.size() - 1));
    }

    public static String unescapeHtml2J(String str) {
        int times = 0;
        while (str.contains("&") && str.contains(";")) {
            str = StringEscapeUtils.unescapeHtml(str);
            times++;
            if (times > 5) {
                break;
            }
        }
        return str;
    }

    private long cleanTime(String timeStr,String pattern) throws ParseException {
        if (timeStr.contains("发布")){
            timeStr = timeStr.split("：")[1];
        }
        if (timeStr.contains("刚刚") || timeStr.contains("今天") || timeStr.contains("1天内")){
            return System.currentTimeMillis() - 5 * 60 * 1000;
        }else if (timeStr.contains("昨天") || timeStr.contains("前天")){
            int amount = 0;
            if (timeStr.startsWith("昨")){
                amount = -1;
            }
            if (timeStr.startsWith("前")){
                amount = -2;
            }
            try {
                String time = timeStr.split("天")[1];
                int hour = Integer.parseInt(time.split(":")[0]);
                int minute = Integer.parseInt(time.split(":")[1]);
                Calendar calendar = Calendar.getInstance();
                calendar.add(Calendar.DATE, amount);
                calendar.set(Calendar.HOUR_OF_DAY,hour);
                calendar.set(Calendar.MINUTE,minute);
                return calendar.getTimeInMillis();
            } catch (Exception e) {
                Calendar calendar = Calendar.getInstance();
                calendar.add(Calendar.DATE,amount);
                return calendar.getTimeInMillis();
            }
        }else if (timeStr.endsWith("前")){
            return timeBefore(timeStr);
        }else if (timeStr.matches("\\d*月\\d*日")){
            String[] strings = timeStr.split("月");
            int month = Integer.parseInt(strings[0]);
            int day = Integer.parseInt(strings[1].split("日")[0]);
            Calendar calendar = Calendar.getInstance();
            calendar.set(Calendar.MONTH,month - 1);
            calendar.set(Calendar.DAY_OF_MONTH,day);
            return calendar.getTimeInMillis();

        }else if (timeStr.matches("\\d*年\\d*月\\d*日")){
            return DateUtils.parseDate(timeStr,"yyyy年MM月dd日").getTime();
        }else if (timeStr.contains("-")){
            return DateUtils.parseDate(timeStr,pattern).getTime();
        }else {
            return 0;
        }

    }

    private static final long ONE_SECOND = 1000L;
    private static final long ONE_MINUTE = 60000L;
    private static final long ONE_HOUR = 3600000L;
    private static final long ONE_DAY = 86400000L;

    private long timeBefore(String timeStr) {
        if (timeStr.matches("\\d*天前")){
            int timeNum = Integer.parseInt(timeStr.split("天")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_DAY);

        }else if (timeStr.matches("\\d*秒前")){
            int timeNum = Integer.parseInt(timeStr.split("秒")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_SECOND);

        }else if (timeStr.matches("\\d*分钟前")){
            int timeNum = Integer.parseInt(timeStr.split("分钟")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_MINUTE);

        }else if (timeStr.matches("\\d*分钟\\d*秒前")) {
            String[] split = timeStr.split("分钟");
            int minutes = Integer.parseInt(split[0]);
            int seconds = Integer.parseInt(split[1].split("秒")[0]);
            long times = (minutes * ONE_MINUTE) + (seconds * ONE_SECOND);
            return System.currentTimeMillis() - times;

        }else if (timeStr.matches("\\d*小时前")){
            int timeNum = Integer.parseInt(timeStr.split("小时")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_HOUR);

        }else if (timeStr.matches("\\d*小时\\d*分钟前")){
            String[] split = timeStr.split("小时");
            int hours = Integer.parseInt(split[0]);
            int minutes = Integer.parseInt(split[1].split("分钟")[0]);
            long times = (hours * ONE_HOUR) + (minutes * ONE_MINUTE);
            return System.currentTimeMillis() - times;

        }else if (timeStr.matches("\\d*周前")){
            int timeNum = Integer.parseInt(timeStr.split("周")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_DAY * 7);

        }else if (timeStr.matches("\\d*个月前")){
            int timeNum = Integer.parseInt(timeStr.split("个")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_DAY * 31);
        }else {
            return 0;

        }
    }
}
