package com.chance.cc.crawler.development.scripts.dcdapp;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Field_Author_Follows;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Series;

/**
 * @author lt
 * @version 1.0
 * @date 2021-05-10 17:32:17
 * @email okprog@sina.com
 */
public class DcdAppNewsCrawlerScript extends CrawlerCommonScript {

    private static Logger logger = LoggerFactory.getLogger(DcdAppNewsCrawlerScript.class);
    private static final String DOMAIN = "dcdapp";
    private static final String ARTICLE = "article";
    private static final String VIDEO = "video";
    private static final String URL_DOMAIN = "dongchedi";
    private static final String Tag_Category = "tag_category";

    public static final String IndexUrl = "https://www.dongchedi.com/";
    public static final String keysRegex = "https?://\\S*v1/meta/" + DOMAIN + "/keys\\S*";
    public static final String commonIndexUrlRegex = "https://www\\.dongchedi\\.com/feed";
    public static final String commonIndexUrlCmtRegex = "https://www\\.dongchedi\\.com/feed#comment";
    public static final String indexUrlRegex = "https://www\\.dongchedi\\.com/auto/series/\\S*";
    public static final String feedListUrlRegex = "https://www\\.(dongchedi|dcdapp)\\.com/motor/stream_entrance/get_feed/v47/\\S*tt_from=\\S*";
    public static final String seriesListUrlRegex = "https://www\\.(dongchedi|dcdapp)\\.com/motor/stream_entrance/get_feed/v47/\\S*concern_id\\S*";
    public static final String itemUrlRegex = "https?://www\\.(dongchedi|dcdapp)\\.com/motor/pc/common/article/detail\\S*";
    public static final String commentUrlRegex = "https?://www\\.(dongchedi|dcdapp)\\.com/motor/proxy/comment_list/article/v3/tab_comments/\\S*";
    public static final String replyUrlRegex = "https?://www\\.(dongchedi|dcdapp)\\.com/motor/proxy/reply_list/2/comment/v3/reply_list/\\S*";
    public static final String seriesInfoUrlRegex = "https://www\\.(dongchedi|dcdapp)\\.com/motor/pc/content/related_series\\S*";

    public static final String indexUrlFormat = "https://www.dongchedi.com/auto/series/%s";
    //车系页文章列表链接  domain 车系id category webID category category times
    public static final String seriesListUrlFormat = "https://www.%s.com/motor/stream_entrance/get_feed/v47/?" +
            "concern_id=%s&category=%s&web_id=%s&device_platform=web&channel=web&tt_from=enter_auto&" +
            "motor_feed_extra_params={\"new_feed\":true,\"series_page_sort\":\"latest\"}&sub_category=%s&" +
            "impression_info={\"page_id\":\"page_car_series\",\"sub_tab\":\"%s\"}&max_behot_time=%s&aid=1839";
    //板块文章列表链接
    public static final String feedListUrlFormat = "https://www.%s.com/motor/stream_entrance/get_feed/v47/?tt_from=%s&" +
            "sub_category=&category=%s&count=20&max_behot_time=%s&" +
            "impression_info={\"page_id\":\"page_car_series\",\"sub_tab\":\"%s\",\"product_name\":\"web\"}&" +
            "aid=1839&refer=1&channel=web&device_platform=web&web_id=%s&" +
            "motor_feed_extra_params={\"new_feed\": true, \"feed_type\": 1}&source=pc&max_behot_time=%s";
    public static final String itemUrlFormat = "https://www.%s.com/motor/pc/common/article/detail?group_id=%s";
    public static final String commentUrlFormat = "https://www.%s.com/motor/proxy/comment_list/article/v3/tab_comments/?aid=1839&app_name=auto_web_pc&group_id=%s&id=%s&offset=%s&count=50";
    public static final String replyUrlFormat = "https://www.%s.com/motor/proxy/reply_list/2/comment/v3/reply_list/?aid=1839&app_name=auto_web_pc&group_id=%s&id=%s&offset=%s&count=50";
    public static final String seriesInfoUrlFormat = "https://www.%s.com/motor/pc/content/related_series?group_id=%s";

    private static final String scriptSite = "Common_A_V";
    private static final String load_more = "load_more";
    private static final String enter_auto = "enter_auto";

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();
        HttpRequest httpRequest = requestRecord.getHttpRequest();
        if(requestRecord.tagsCreator().bizTags().hasKVTag(Tag_Field_Domain_Result_Json)){
            KVTag domainResultJson = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(Tag_Field_Domain_Result_Json);
            CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()),CrawlerDomainUrls.class);
            String url = crawlerDomainUrls.getUrl();
            JSONObject contentObj = JSONObject.parseObject(url);
            String articleUrl = contentObj.getString(Field_Urls);
            Long releaseTime = contentObj.getLong("releaseTime");
            String keyword = contentObj.getString("keyword");
            String site = contentObj.getString("site");
            if (StringUtils.isNotBlank(keyword)){
                requestRecord.tagsCreator().bizTags().addKeywords(keyword);
            }
            requestRecord.setReleaseTime(releaseTime);
            requestRecord.setNeedParsedPage(true);
            requestRecord.tagsCreator().resultTags().addResultDataType(article);
            requestRecord.tagsCreator().resultTags().addResultDataType(interaction);
            requestRecord.tagsCreator().bizTags().addSite(site);
            httpRequest.setUrl(articleUrl);
            requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(Tag_Field_Domain_Result_Json); //移除
        }
        if (supportSourceRecords == null || supportSourceRecords.size() < 1) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String supportUrl = supportSourceRecord.getHttpRequest().getUrl();
            if (supportUrl.matches(keysRegex)) {
                try {
                    JSONObject jsonObject = JSONObject.parseObject(supportSourceRecord.getInternalDownloadPage().getRawText());
                    if (jsonObject.getIntValue("status") == 0) {
                        JSONArray objects = jsonObject.getJSONArray("content");
                        for (Object object : objects) {
                            String keyword = ((JSONObject) object).getString("keyword");
                            String indexUrl = String.format(indexUrlFormat,keyword);
                            CrawlerRequestRecord indexRecord = CrawlerRequestRecord.builder()
                                    .turnPageRequest(requestRecord)
                                    .httpUrl(indexUrl)
                                    .recordKey(indexUrl)
                                    .releaseTime(System.currentTimeMillis())
                                    .notFilterRecord()
                                    .copyBizTags()
                                    .build();
                            indexRecord.tagsCreator().bizTags().addKeywords(keyword);
                            allItemRecords.add(indexRecord);
                        }
                    }
                } catch (Exception e) {
                    logger.error("from keywords init urls failed");
                    logger.error(e.getMessage(), e);
                }
            }
        }
        if (allItemRecords.isEmpty()) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        return allItemRecords;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        if (404 == httpPage.getStatusCode()) {
            logger.info("status code is 404");
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        if (doHttpPageCheck(crawlerRequestRecord, httpPage)) {
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(commonIndexUrlCmtRegex)){
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                logger.error(e.getMessage());
            }
        }
        if (lastRequestUrl.matches(commonIndexUrlRegex)) {
            return parseCommonIndexLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(feedListUrlRegex) && !lastRequestUrl.contains("concern_id")) {
            return parseFeedListLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(seriesListUrlRegex)) {
            return parseSeriesListLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(itemUrlRegex)) {
            return parseItemLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(commentUrlRegex)) {
            return parseCommentLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(indexUrlRegex)){
            return genSeriesListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> genSeriesListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String seriesId = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1);
        List<String> categories = Arrays.asList("all", "video");
        String times = String.valueOf(System.currentTimeMillis()).substring(0, 10);
        for (String category : categories) {
            String seriesListUrl = getEncodeURL(String.format(seriesListUrlFormat,URL_DOMAIN,seriesId,category,generateString(15),category,category,times));
            CrawlerRequestRecord seriesListRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(seriesListUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .notFilterRecord()
                    .build();
            HttpRequest feedRequest = seriesListRecord.getHttpRequest();
            feedRequest.addHeader("authority", String.format("www.%s.com", URL_DOMAIN));
            String type = ARTICLE;
            if (category.equalsIgnoreCase(VIDEO)){
                type = VIDEO;
            }
            feedRequest.addHeader("referer", String.format("https://www.%s.com/auto/series/%s/%s", URL_DOMAIN, seriesId,type));
            feedRequest.addHeader("accept", "*/*");
            feedRequest.addHeader("user-agent", getRandomUA());
            parsedLinks.add(seriesListRecord);
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        String articleKey = (String) extras.get("articleKey");
        String articleUrl = (String) extras.get("articleUrl");
        String rawText = httpPage.getRawText();
        if (StringUtils.isBlank(rawText)) {
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)) {
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONObject dataObj = pageObj.getJSONObject("data");
            JSONObject authorObj = dataObj.getJSONObject("media_user");
            String title = dataObj.getString("title");
            String comments = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("comments");
            String views = dataObj.getString("watch_count");
            String likes = dataObj.getString("digg_count");
            String author = authorObj.getString("screen_name");
            String authorId = authorObj.getString("id");
            String follows = authorObj.getString("follow_count");
            String content = dataObj.getString("content");
            Html html = new Html(content);
            List<String> allContents = html.xpath("//p//text()").all();
            StringBuffer sbContent = new StringBuffer();
            for (String allContent : allContents) {
                if (allContent.startsWith("{") && allContent.endsWith("}")){
                    continue;
                }
                sbContent.append(allContent);
            }
            List<String> allImages = html.xpath("//img/@src").all();
            StringBuffer sbImage = new StringBuffer();
            for (String allImage : allImages) {
                sbImage.append(allImage).append("\\0x1");
            }
            CrawlerData crawlerAData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .url(articleUrl)
                    .dataId(StringUtils.joinWith("-", domain(), site, article.enumVal(), articleKey))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Content, sbContent.toString())
                    .addContentKV(Field_Images, sbImage.toString())
                    .addContentKV(Field_Author_Follows, follows)
                    .resultLabelTag(article)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .flowInPipelineTag("kafka_result")
                    .flowInPipelineTag("file")
                    .flowInPipelineTag("console")
                    .build();
            crawlerAData.setFilterPipelineResult(true);
            crawlerAData.tagsCreator().bizTags().addCustomKV("data_type", site);
            //流向redis的data
            CrawlerData crawlerRedisResultData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(articleUrl)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
//                    .addContentKV(Field_Urls,crawlerRequestRecord.getHttpRequest().getUrl())
//                    .addContentKV("releaseTime",String.valueOf(crawlerRequestRecord.getReleaseTime()))
                    .resultLabelTag(article)
                    .flowInPipelineTag("redis_result")
                    .build();
            JSONObject data = new JSONObject();
            data.put(Field_Urls,httpRequestUrl);
            data.put("releaseTime",crawlerRequestRecord.getReleaseTime());
            data.put("site",site);
            if (crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("keywords")){
                String keywords = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("keywords");
                keywords = keywords.split("\"")[1];
                data.put("keyword",keywords);
            }
            crawlerRedisResultData.setCrawlerContent(data.toJSONString());
            crawlerRedisResultData.setFilterPipelineResult(true);
            crawlerDataList.add(crawlerAData);
            crawlerDataList.add(crawlerRedisResultData);
            if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)) {
                CrawlerData crawlerInteractionData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .url(articleUrl)
                        .dataId(StringUtils.joinWith("-", domain(), site, interaction.enumVal(), articleKey))
                        .parentId(StringUtils.joinWith("-", domain(), site, article.enumVal(), articleKey))
                        .releaseTime(crawlerRequestRecord.getReleaseTime())
                        .addContentKV(Field_I_Comments, comments)
                        .addContentKV(Field_I_Views, views)
                        .addContentKV(Field_I_Likes, likes)
                        .resultLabelTag(interaction)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .flowInPipelineTag("kafka_result")
                        .flowInPipelineTag("file")
                        .flowInPipelineTag("console")
                        .build();
                crawlerInteractionData.setFilterPipelineResult(true);
                crawlerDataList.add(crawlerInteractionData);
            }
        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)) {

            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            if (httpRequestUrl.matches(commentUrlRegex)) {
                JSONArray comments = pageObj.getJSONArray("data");
                for (Object cmt : comments) {
                    JSONObject cmtObj = ((JSONObject) cmt).getJSONObject("comment");
                    String commentId = cmtObj.getString("id_str");
                    String content = cmtObj.getString("text");
                    String author = cmtObj.getString("user_name");
                    String authorId = cmtObj.getString("user_id");
                    String pubTime = cmtObj.getString("create_time");
                    String likes = cmtObj.getString("digg_count");
                    String replies = ((JSONObject) cmt).getString("reply_count");
                    Long releaseTime = new Long(pubTime + "000");
                    CrawlerData crawlerCData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .url(articleUrl)
                            .dataId(StringUtils.joinWith("-", domain(), site, comment.enumVal(), commentId))
                            .parentId(StringUtils.joinWith("-", domain(), site, article.enumVal(), articleKey))
                            .releaseTime(releaseTime)
                            .addContentKV(Field_Author, author)
                            .addContentKV(Field_Author_Id, authorId)
                            .addContentKV(Field_Content, content)
                            .resultLabelTag(comment)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .flowInPipelineTag("kafka_result")
                            .flowInPipelineTag("file")
                            .flowInPipelineTag("console")
                            .build();
                    crawlerCData.setFilterPipelineResult(true);
                    crawlerDataList.add(crawlerCData);
                    if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)) {
                        CrawlerData crawlerInteractionData = CrawlerData.builder()
                                .data(crawlerRequestRecord, httpPage)
                                .url(articleUrl)
                                .dataId(StringUtils.joinWith("-", domain(), site, interaction.enumVal(), commentId))
                                .parentId(StringUtils.joinWith("-", domain(), site, comment.enumVal(), commentId))
                                .releaseTime(releaseTime)
                                .addContentKV(Field_I_Comments, replies)
                                .addContentKV(Field_I_Likes, likes)
                                .resultLabelTag(interaction)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                                .flowInPipelineTag("kafka_result")
                                .flowInPipelineTag("file")
                                .flowInPipelineTag("console")
                                .build();
                        crawlerInteractionData.setFilterPipelineResult(true);
                        crawlerDataList.add(crawlerInteractionData);
                    }
                }
            }
            if (httpRequestUrl.matches(replyUrlRegex)) {
                JSONArray replies = pageObj.getJSONObject("data").getJSONArray("data");
                for (Object rep : replies) {
                    JSONObject cmtObj = (JSONObject) rep;
                    String commentId = cmtObj.getString("id_str");
                    String content = cmtObj.getString("text");
                    String pubTime = cmtObj.getString("create_time");
                    Long releaseTime = new Long(pubTime + "000");
                    String likes = cmtObj.getString("digg_count");
                    String author = cmtObj.getJSONObject("user").getString("name");
                    String authorId = cmtObj.getJSONObject("user").getString("user_id");
                    CrawlerData crawlerCData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .url(articleUrl)
                            .dataId(StringUtils.joinWith("-", domain(), site, comment.enumVal(), commentId))
                            .parentId(StringUtils.joinWith("-", domain(), site, article.enumVal(), articleKey))
                            .releaseTime(releaseTime)
                            .addContentKV(Field_Author, author)
                            .addContentKV(Field_Author_Id, authorId)
                            .addContentKV(Field_Content, content)
                            .resultLabelTag(comment)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .flowInPipelineTag("kafka_result")
                            .flowInPipelineTag("file")
                            .flowInPipelineTag("console")
                            .build();
                    crawlerCData.setFilterPipelineResult(true);
                    crawlerDataList.add(crawlerCData);
                    if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)) {
                        CrawlerData crawlerInteractionData = CrawlerData.builder()
                                .data(crawlerRequestRecord, httpPage)
                                .url(articleUrl)
                                .dataId(StringUtils.joinWith("-", domain(), site, interaction.enumVal(), commentId))
                                .parentId(StringUtils.joinWith("-", domain(), site, comment.enumVal(), commentId))
                                .releaseTime(releaseTime)
                                .addContentKV(Field_I_Likes, likes)
                                .resultLabelTag(interaction)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                                .flowInPipelineTag("kafka_result")
                                .flowInPipelineTag("file")
                                .flowInPipelineTag("console")
                                .build();
                        crawlerInteractionData.setFilterPipelineResult(true);
                        crawlerDataList.add(crawlerInteractionData);
                    }
                }
            }

//            Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
//            if (null == urlParams || StringUtils.isBlank(httpPage.getRawText())){
//                return crawlerDataList;
//            }
//            String itemId = (String) urlParams.get("id");
//            String groupId = (String) urlParams.get("group_id");
//            if (!groupId.equalsIgnoreCase(itemId)){
//                JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
//                JSONArray cmtJSONObjs = pageObj.getJSONArray("data");
//                for (Object cmtJSONObj : cmtJSONObjs) {
//                    JSONObject cmtObj = ((JSONObject)cmtJSONObj).getJSONObject("comment");
//                    String cmtId = cmtObj.getString("id_str");
//                    String content = cmtObj.getString("text");
//                    String author = cmtObj.getString("user_name");
//                    String authorId = cmtObj.getString("user_id");
//                    long releaseTime = cmtObj.getLongValue("create_time") * 1000;
//                    String likes = cmtObj.getString("digg_count");
//                    String forwards = cmtObj.getString("forward_count");
//                    String comments = cmtObj.getString("reply_count");
//
//
//                }
//            }

        }
        return crawlerDataList;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
        if (null == urlParams || StringUtils.isBlank(httpPage.getRawText())) {
            logger.error("download comment page failed");
            return parsedLinks;
        }
        String itemId = (String) urlParams.get("id");
        String offsetStr = (String) urlParams.get("offset");
        String count = (String) urlParams.get("count");

        Map<String, Object> extras = httpRequest.getExtras();
        String articleKey = (String) extras.get("articleKey");
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        boolean hasMore = pageObj.getBooleanValue("has_more");
        if (hasMore) {
            int newOffset = Integer.parseInt(offsetStr) + Integer.parseInt(count);
            String commentUrl = String.format(commentUrlFormat, URL_DOMAIN, articleKey, itemId, newOffset);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .notFilterRecord()
                    .build();
            commentRecord.getHttpRequest().setExtras(copyExtras(extras));
            parsedLinks.add(commentRecord);
        }
        JSONArray cmtJSONObjs = pageObj.getJSONArray("data");
        if (cmtJSONObjs.size() > 0) {
            crawlerRequestRecord.setNeedWashPage(true);
        }
        //回复的链接
        for (Object cmtJSONObj : cmtJSONObjs) {
            JSONObject cmtObj = ((JSONObject) cmtJSONObj).getJSONObject("comment");
            int replyCount = cmtObj.getIntValue("reply_count");
            if (replyCount > 0) {
                String idStr = cmtObj.getString("id_str");
                long releaseTime = cmtObj.getLongValue("create_time") * 1000;
                String replyUrl = String.format(replyUrlFormat, URL_DOMAIN, articleKey, idStr, 0);
                CrawlerRequestRecord replyRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(replyUrl)
                        .releaseTime(releaseTime)
                        .copyBizTags()
                        .notFilterRecord()
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .needParsed(false)
                        .needWashed(true)
                        .build();
                replyRecord.getHttpRequest().setExtras(extras);
                parsedLinks.add(replyRecord);
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseItemLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        crawlerRequestRecord.setNeedWashPage(true);
        String articleKey = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("=") + 1);
        httpRequest.addExtra("articleKey", articleKey);
        httpRequest.addExtra("articleUrl", String.format("https://www.%s.com/article/%s",URL_DOMAIN,articleKey));
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        JSONObject dataObj = pageObj.getJSONObject("data");
        if (null == dataObj.getJSONObject("media_user")){
            logger.error("download page is not full ,download agent");
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            CrawlerBusinessTags crawlerBusinessTags = crawlerRequestRecord.tagsCreator().bizTags();
            String downloadTimes = crawlerBusinessTags.getCategoryTag().getKVTagStrVal("downloadTimes");
            if (StringUtils.isBlank(downloadTimes)){
                crawlerBusinessTags.addCustomKV("downloadTimes",1);
            }else {
                int times = Integer.parseInt(downloadTimes);
                crawlerBusinessTags.addCustomKV("downloadTimes",times + 1);
                if (times > 10){
                    logger.error("link download too many times");
                    return parsedLinks;
                }
            }
            httpRequest.addHeader("user-agent",getRandomUA());
            crawlerRequestRecord.setNeedWashPage(false);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove("downloadTimes");

        String commentUrl = String.format(commentUrlFormat, URL_DOMAIN, articleKey, articleKey, 0);
        //内下载评论数
        CrawlerRequestRecord commentsRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .recordKey(commentUrl + "#comments")
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(commentsRecord);
        //内下载车系数据
        String seriesInfoUrl = String.format(seriesInfoUrlFormat, URL_DOMAIN, articleKey);
        CrawlerRequestRecord seriesInfoRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(seriesInfoUrl)
                .recordKey(seriesInfoUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(seriesInfoRecord);

        //判断是否采集评论
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
        if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
            if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                logger.error("dongchedi crawler comment need to filter information!");
                return parsedLinks;
            }

            KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .notFilterRecord()
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .build();

            commentRecord.setFilter(filterInfoRecord.getFilter());
            commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            HttpRequest commentRequest = commentRecord.getHttpRequest();
            commentRequest.addExtra("articleKey", articleKey);
            commentRequest.addExtra("articleUrl", httpRequestUrl);
            parsedLinks.add(commentRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseFeedListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String category = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(Tag_Category);
        try {
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONArray feeds = pageObj.getJSONArray("data");
            if (feeds.size() == 0) {
                logger.error("download page has no data , check now");
                crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                CrawlerBusinessTags crawlerBusinessTags = crawlerRequestRecord.tagsCreator().bizTags();
                String downloadTimes = crawlerBusinessTags.getCategoryTag().getKVTagStrVal("downloadTimes");
                if (StringUtils.isBlank(downloadTimes)){
                    crawlerBusinessTags.addCustomKV("downloadTimes",1);
                }else {
                    int times = Integer.parseInt(downloadTimes);
                    crawlerBusinessTags.addCustomKV("downloadTimes",times + 1);
                    if (times > 10){
                        logger.error("link download too many times");
                        return parsedLinks;
                    }
                }
                parsedLinks.add(crawlerRequestRecord);
                return parsedLinks;
            }
            crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove("downloadTimes");
            JSONObject lastObj = feeds.getJSONObject(feeds.size() - 1);
            String beHotTime = lastObj.getJSONObject("info").getString("cursor");
            beHotTime = beHotTime.substring(0, 10);
            boolean hasMore = pageObj.getBooleanValue("has_more");
            if (hasMore) {
                String feedListUrl = getEncodeURL(String.format(feedListUrlFormat, URL_DOMAIN, load_more, category, beHotTime, category, generateString(15), beHotTime));
                CrawlerRequestRecord feedRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(feedListUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                HttpRequest feedRequest = feedRecord.getHttpRequest();
                feedRequest.addHeader("authority", String.format("www.%s.com", URL_DOMAIN));
                feedRequest.addHeader("referer", String.format("www.%s.com/feed", URL_DOMAIN));
                feedRequest.addHeader("accept", "*/*");
                feedRequest.addHeader("user-agent", getRandomUA());
                parsedLinks.add(feedRecord);
            }
            for (Object feed : feeds) {
                JSONObject feedObj = (JSONObject) feed;
                String uniqueIdStr = feedObj.getString("unique_id_str");
                String type = feedObj.getString("type");
                JSONObject infoObj = feedObj.getJSONObject("info");
                long releaseTime = infoObj.getLongValue("publish_time") * 1000;
                String info = infoObj.toJSONString();
                String itemUrl = String.format(itemUrlFormat, URL_DOMAIN, uniqueIdStr);
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(releaseTime)
                        .copyBizTags()
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                if (type.startsWith("20")) {
                    itemRecord.tagsCreator().bizTags().addSite(ARTICLE);
                } else if (type.startsWith("21")) {
                    itemRecord.tagsCreator().bizTags().addSite(VIDEO);
                } else {
                    logger.error("unknown media type --> {}", type);
                    continue;
                }
                itemRecord.getHttpRequest().addExtra("info", info);
                parsedLinks.add(itemRecord);
            }
        } catch (Exception e) {
            logger.error(e.getMessage());
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseSeriesListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
        if (null == urlParams){
            logger.error("series list url can not parse to map");
            return parsedLinks;
        }
        String category = (String) urlParams.get("category");
        String concernId = (String) urlParams.get("concern_id");
        try {
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONArray feeds = pageObj.getJSONArray("data");
            if (feeds.size() == 0) {
                logger.error("download page has no data , check now");
                crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                CrawlerBusinessTags crawlerBusinessTags = crawlerRequestRecord.tagsCreator().bizTags();
                String downloadTimes = crawlerBusinessTags.getCategoryTag().getKVTagStrVal("downloadTimes");
                if (StringUtils.isBlank(downloadTimes)){
                    crawlerBusinessTags.addCustomKV("downloadTimes",1);
                }else {
                    int times = Integer.parseInt(downloadTimes);
                    crawlerBusinessTags.addCustomKV("downloadTimes",times + 1);
                    if (times > 10){
                        logger.error("link download too many times");
                        return parsedLinks;
                    }
                }
                parsedLinks.add(crawlerRequestRecord);
                return parsedLinks;
            }
            crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove("downloadTimes");
            JSONObject lastObj = feeds.getJSONObject(feeds.size() - 1);
            String beHotTime = lastObj.getJSONObject("info").getString("behot_time");
//            beHotTime = beHotTime.substring(0, 10);
            boolean hasMore = pageObj.getBooleanValue("has_more");
            if (hasMore) {
                String seriesListUrl = getEncodeURL(String.format(seriesListUrlFormat, URL_DOMAIN, concernId, category,  generateString(15), category, category,  beHotTime));
                CrawlerRequestRecord feedRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(seriesListUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                HttpRequest feedRequest = feedRecord.getHttpRequest();
                feedRequest.addHeader("authority", String.format("www.%s.com", URL_DOMAIN));
                String type = ARTICLE;
                if (category.equalsIgnoreCase(VIDEO)){
                    type = VIDEO;
                }
                feedRequest.addHeader("referer", String.format("https://www.%s.com/auto/series/%s/%s", URL_DOMAIN, concernId,type));
                feedRequest.addHeader("accept", "*/*");
                feedRequest.addHeader("user-agent", getRandomUA());
                parsedLinks.add(feedRecord);
            }
            for (Object feed : feeds) {
                JSONObject feedObj = (JSONObject) feed;
                String uniqueIdStr = feedObj.getString("unique_id_str");
                String type = feedObj.getString("type");
                JSONObject infoObj = feedObj.getJSONObject("info");
                long releaseTime = infoObj.getLongValue("publish_time") * 1000;
                String info = infoObj.toJSONString();
                String itemUrl = String.format(itemUrlFormat, URL_DOMAIN, uniqueIdStr);
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(releaseTime)
                        .copyBizTags()
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                if (type.startsWith("20")) {
                    itemRecord.tagsCreator().bizTags().addSite(ARTICLE);
                } else if (type.startsWith("21")) {
                    itemRecord.tagsCreator().bizTags().addSite(VIDEO);
                } else if (type.startsWith("23")) {
                    itemRecord.tagsCreator().bizTags().addSite(VIDEO);
                } else {
                    logger.error("unknown media type --> {}", type);
                    continue;
                }
                itemRecord.getHttpRequest().addExtra("info", info);
                itemRecord.getHttpRequest().addHeader("authority", String.format("www.%s.com", URL_DOMAIN));
                itemRecord.getHttpRequest().addHeader("Host", String.format("www.%s.com", URL_DOMAIN));
                itemRecord.getHttpRequest().addHeader("accept","text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9");
                itemRecord.getHttpRequest().addHeader("cache-control","max-age=0");
                itemRecord.getHttpRequest().addHeader("sec-ch-ua-mobile","?0");
                itemRecord.getHttpRequest().addHeader("sec-ch-ua","\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"90\", \"Google Chrome\";v=\"90\"");
                itemRecord.getHttpRequest().addHeader("upgrade-insecure-requests","1");
                itemRecord.getHttpRequest().addHeader("sec-fetch-site","same-origin");
                itemRecord.getHttpRequest().addHeader("sec-fetch-mode","navigate");
                itemRecord.getHttpRequest().addHeader("sec-fetch-user","?1");
                itemRecord.getHttpRequest().addHeader("sec-fetch-dest","document");
                itemRecord.getHttpRequest().addHeader("accept-language","zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7");
                itemRecord.getHttpRequest().addHeader("referer","https://www.dongchedi.com/");
//                itemRecord.getHttpRequest().addHeader("user-agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36");
                itemRecord.getHttpRequest().addHeader("user-agent",getRandomUA());
                parsedLinks.add(itemRecord);
            }
        } catch (Exception e) {
            logger.error(e.getMessage());
        }
        return parsedLinks;
    }

    @Test
    public void test() {
        System.out.println(generateString(15));
        System.out.println(1618393501L * 1000);
    }

    /**
     * 由大小写字母、数字组成的随机字符串
     *
     * @param length
     * @return
     */
    public static String generateString(int length) {
        final String allChar = "0123456789";
        StringBuffer sb = new StringBuffer();
        Random random = new Random();
        for (int i = 0; i < length; i++) {
            sb.append(allChar.charAt(random.nextInt(allChar.length())));
        }
        return sb.toString();
    }

    private List<CrawlerRequestRecord> parseCommonIndexLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        //获取domain
        //翻页列表
        String curTimes = String.valueOf(System.currentTimeMillis());
        String times = curTimes.substring(0, 10);
        //获取源代码里注入的category
        JSONArray categories = getCategories(httpPage);
        if (categories.size() == 0) {
            logger.error("download index page error");
        }
        for (Object category : categories) {
            JSONObject cateObj = (JSONObject) category;
            String text = cateObj.getString("label");
            String cate = cateObj.getString("id");
            String feedListUrl = getEncodeURL(String.format(feedListUrlFormat, URL_DOMAIN, enter_auto, cate, times, cate,generateString(15) , times));
            CrawlerRequestRecord feedRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(feedListUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .notFilterRecord()
                    .build();
            HttpRequest feedRequest = feedRecord.getHttpRequest();
            feedRequest.addHeader("authority", String.format("www.%s.com", URL_DOMAIN));
            feedRequest.addHeader("referer", String.format("www.%s.com/feed", URL_DOMAIN));
            feedRequest.addHeader("accept", "*/*");
            feedRequest.addHeader("user-agent", getRandomUA());
            List<String> path = new ArrayList<>();
            path.add(text);
            feedRecord.tagsCreator().bizTags().addCustomKV(Field_Path, path);
            feedRecord.tagsCreator().bizTags().addCustomKV(Tag_Category, cate);
            parsedLinks.add(feedRecord);
        }
        return parsedLinks;
    }

    private JSONArray getCategories(HttpPage httpPage) {
        Html html = httpPage.getHtml();
        try {
            String dataText = html.xpath("//script[@id=\"__NEXT_DATA__\"]/text()").get();
            JSONObject dataObj = JSONObject.parseObject(dataText);
            JSONArray array = dataObj.getJSONObject("props").getJSONObject("pageProps").getJSONArray("feedTabData");
            if (null == array){
                array = JSONArray.parseArray("[{\"id\":\"motor_car\",\"label\":\"推荐\"},{\"id\":\"motor_car_new_arrival\",\"label\":\"新车\"},{\"id\":\"motor_local\",\"label\":\"本地\"},{\"id\":\"motor_car_new_guide\",\"label\":\"导购\"},{\"id\":\"motor_car_suv\",\"label\":\"SUV\"},{\"id\":\"motor_industry\",\"label\":\"行业\"},{\"id\":\"motor_car_new_energy\",\"label\":\"新能源\"},{\"id\":\"motor_car_motorcycle\",\"label\":\"摩托车\"},{\"id\":\"motor_car_independent_brand\",\"label\":\"中国品牌\"},{\"id\":\"motor_car_use\",\"label\":\"用车\"},{\"id\":\"motor_car_evaluating\",\"label\":\"试车\"},{\"id\":\"motor_car_trail_diary\",\"label\":\"提车作业\"},{\"id\":\"motor_car_travel\",\"label\":\"自驾游\"},{\"id\":\"motor_car_offroad\",\"label\":\"越野\"},{\"id\":\"motor_car_modification\",\"label\":\"改装\"}]");
            }
            return array;
        } catch (Exception e) {
            logger.error(e.getMessage(), "feed page cannot find feed tab data");
            return new JSONArray();
        }
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        HttpRequest httpRequest = crawlerRecord.getHttpRequest();
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            try{
                HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
                HttpRequest downloadRecordHttpRequest = internalDownloadRecord.getHttpRequest();
                String recordHttpRequestUrl = downloadRecordHttpRequest.getUrl();
                if (!internalDownloadPage.isDownloadSuccess()){
                    throw new Exception("internal download failed : ["+ recordHttpRequestUrl +"]");
                }
                if (recordHttpRequestUrl.matches(commentUrlRegex)){
                    JSONObject pageObj = JSONObject.parseObject(internalDownloadPage.getRawText());
                    String comments = pageObj.getString("total_number");
                    crawlerRecord.tagsCreator().bizTags().addCustomKV("comments",comments);
                }
                if (recordHttpRequestUrl.matches(seriesInfoUrlRegex)){
                    JSONObject pageObj = JSONObject.parseObject(internalDownloadPage.getRawText());
                    JSONArray cars = pageObj.getJSONArray("data");
                    List<Map<String, String>> series = new ArrayList<>();
                    for (Object car : cars) {
                        JSONObject carObj = (JSONObject)car;
                        Map<String, String> map = new HashMap<>();
                        String seriesName = carObj.getString("series_name");
                        String seriesId = carObj.getString("series_id");
                        String seriesUrl = String.format("https://www.dongchedi.com/auto/series/%s", seriesId);
                        map.put("series_name", seriesName);
                        map.put("series_id", seriesId);
                        map.put("series_url", seriesUrl);
                        series.add(map);
                    }
                    crawlerRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Series, series);
                }

            }catch (Exception e){
                logger.error(e.getMessage());
                crawlerRecord.setNeedWashPage(false);
                crawlerRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                links.add(crawlerRecord);
                return;
            }
        }
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(IndexUrl);
        addUrlRegular(keysRegex);
        addUrlRegular(commonIndexUrlRegex);
        addUrlRegular(commonIndexUrlCmtRegex);
        addUrlRegular(feedListUrlRegex);
        addUrlRegular(itemUrlRegex);
        addUrlRegular(commentUrlRegex);
        addUrlRegular(replyUrlRegex);
        addUrlRegular(seriesListUrlRegex);
        addUrlRegular(indexUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(ARTICLE) || crawlerSite.equalsIgnoreCase(VIDEO) || crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 检查页面下载是否成功、完整
     *
     * @param crawlerRequestRecord last record
     * @param httpPage             page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200) {
            logger.error("download page {} error, status code is {}", lastRequestUrl, statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()) {
            logger.error("download page failed, check your link {}", lastRequestUrl);
            return true;
        }
        if (StringUtils.isBlank(httpPage.getRawText())) {
            logger.error("download page empty, check your link {}", lastRequestUrl);
            return true;
        }
        return false;
    }

    public static Map<String, Object> copyExtras(Map<String, Object> inExtras) {
        Map<String, Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(), entry.getValue());
        }
        return extras;
    }

    /**
     * 将url参数转换成map
     *
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    public static Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = null;
        if (url.contains("?")) {
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    /**
     * 转换map到url参数字符串
     *
     * @param source url参数的map
     * @return 拼接好的url参数字符串
     */
    public static String asUrlParams(Map<String, Object> source) {
        Iterator<String> it = source.keySet().iterator();
        StringBuilder paramStr = new StringBuilder();
        while (it.hasNext()) {
            String key = it.next();
            String value = (String) source.get(key);
            if (StringUtils.isBlank(value)) {
                continue;
            }
            try {
                // URL 编码
                value = URLEncoder.encode(value, "utf-8");
            } catch (UnsupportedEncodingException e) {
                // do nothing
                logger.error("url encode error");
            }
            paramStr.append("&").append(key).append("=").append(value);
        }
        // 去掉第一个&
        return paramStr.substring(1);
    }

    /**
     * 组合url编码参数
     *
     * @param url url
     * @return encode url
     */
    public static String getEncodeURL(String url) {
        if (!url.contains("?")) {
            return url;
        }
        Map<String, Object> urlParams = getUrlParams(url);
        if (null == urlParams) {
            return url;
        }
        String[] strings = url.split("\\?");
        String preUrl = strings[0] + "?";
        String subUrl = asUrlParams(urlParams);
        return preUrl + subUrl;
    }

    private static List<String> agentList = new ArrayList<>();

    static {
        agentList.add("Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)");
        agentList.add("Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.28.3 (KHTML, like Gecko) Version/3.2.3 ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/525.28.3");
        agentList.add("Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16");
        agentList.add("Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14");
        agentList.add("Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14");
        agentList.add("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14");
        agentList.add("Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02");
        agentList.add("Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0");
        agentList.add("Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.13 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400");
    }

    public static <T> Map<T, T> castMap(Object obj, Class<T> clazz) {
        Map<T, T> result = new HashMap<>();
        if (obj instanceof Map<?, ?>) {
            Set<?> keySet = ((Map<?, ?>) obj).keySet();
            Set<? extends Map.Entry<?, ?>> entries = ((Map<?, ?>) obj).entrySet();
            for (Map.Entry<?, ?> entry : entries) {
                result.put(clazz.cast(entry.getKey()), clazz.cast(entry.getValue()));
            }
            return result;
        }
        return null;
    }

    private static String getRandomUA() {
        return agentList.get(RandomUtils.nextInt(0, agentList.size() - 1));
    }

    public static String unescapeHtml2J(String str) {
        int times = 0;
        while (str.contains("&") && str.contains(";")) {
            str = StringEscapeUtils.unescapeHtml(str);
            times++;
            if (times > 5) {
                break;
            }
        }
        return str;
    }
}
