package com.chance.cc.crawler.development.scripts.dcdapp.praise;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.nio.charset.Charset;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/3/1 11:41
 * @Description 懂车帝 口碑采集
 **/
public class DcdappPraiseCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(DcdappPraiseCrawlerScript.class);
    private static final String DOMAIN = "dcdapp";
    private static final String SITE = "praise";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";//dcdapp_request_again
    private static final String KEYWORDS = "keywords";
    private static final String DOMAIN_NAME = "domainName";
    private static final String COMMENT_FILTER_INFO = "comment_filter_info";
    private static final String DOMAIN_RESULT_JSON_RECORD_TAG = "domain_result_json";//初始record结果字段
    private static final String KAFKA = "kafka";
    private static final String REDIS = "redis";

    private static final String SERIES_ENTRANCE_URL = "https://www.dcdapp.com/";
    private static final String SERIES_SOURCE_URL = "https://www.%s.com/auto/series/%s";
    private static final String SERIES_AUTO_URL = "https://www.(dongchedi|dcdapp).com/auto/series/\\d+";

    private static final String PRAISE_JSON_SOURCE_URL = "https://www.%s.com/motor/koubei_api/koubei_list_v2?aid=1839" +
            "&motor_feed_extra_params=%s&sort_type=2&tt_from=%s&count=10" +
            "&series_id=%s&tag_id=1&car_id=0&impression_info=%s&loc_mode=1" +
            "&refer=2&tag_name=%s&max_cursor=%s&version_code=6.1.5&device_platform=iphone";//最新排序的json地址。页面上是推荐json地址
    private static final String PRAISE_JSON_SOURCE_URL1 = "https://www.dongchedi.com/motor/pc/car/series/get_review_list?" +
            "series_id=%s&count=15&page=%s&sort_by=create_time&only_owner=0&part_id=S0&tab=";
    private static final String PRAISE_JSON_SOURCE_URL2 = "https://www.dongchedi.com/motor/pc/car/series/get_review_list\\S*";

    private static final String PRAISE_JSON_URL = "https://www.(dongchedi|dcdapp).com/motor/koubei_api/koubei_list_v2\\S*tag_name=%E6%9C%80%E6%96%B0\\S*";
    private static final String PRAISE_ITEM_SOURCE_URL = "https://www.%s.com/koubei/%s";
    private static final String PRAISE_ITEM_URL = "https://www.(dongchedi|dcdapp).com/koubei/\\d+";

    private static final String USER_SOURCE_URL = "https://www.%s.com/user/%s";
    private static final String USER_URL = "https://www.(dongchedi|dcdapp).com/user/\\d+";
    private static final String COMMENT_SOURCE_URL = "https://www.%s.com/motor/pc/ugc/detail/comment_list?group_id=%s&count=20&cursor=%s";
    private static final String COMMENT_URL = "https://www.(dongchedi|dcdapp).com/motor/pc/ugc/detail/comment_list\\S*";
    private static final String COMMENT_REPLY_SOURCE_URL = "https://www.%s.com/motor/pc/ugc/detail/reply_list?count=10&comment_id=%s&cursor=%s&group_id=%s";
    private static final String COMMENT_REPLY_URL = "https://www.(dongchedi|dcdapp).com/motor/pc/ugc/detail/reply_list\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(SERIES_ENTRANCE_URL);
        addUrlRegular(SERIES_AUTO_URL);
        addUrlRegular(PRAISE_JSON_URL);
        addUrlRegular(PRAISE_ITEM_URL);
        addUrlRegular(PRAISE_JSON_SOURCE_URL1);
        addUrlRegular(PRAISE_JSON_SOURCE_URL2);
        addUrlRegular(COMMENT_URL);
        addUrlRegular(COMMENT_REPLY_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().site();
        return SITE.equals(site);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        String requestUrl = requestRecord.getHttpRequest().getUrl();

        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord);
            }
        }

        if (requestUrl.matches(SERIES_ENTRANCE_URL) && requestRecord.tagsCreator().bizTags().hasKVTag(DOMAIN_RESULT_JSON_RECORD_TAG)) {
            KVTag domainResultJson = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(DOMAIN_RESULT_JSON_RECORD_TAG);
            CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()), CrawlerDomainUrls.class);
            String url = crawlerDomainUrls.getUrl();
            Json urlJson = new Json(url);
            String itemUrl = urlJson.jsonPath($_type + ".itemUrl").get();
            long releaseTimeToLong = Long.parseLong(urlJson.jsonPath($_type + ".releaseTimeToLong").get());
            if (!isDateRange(requestRecord, releaseTimeToLong)) {
                return crawlerRecords;
            }
            requestRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag(DOMAIN_RESULT_JSON_RECORD_TAG);
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(requestRecord)
                    .httpUrl(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            crawlerRecords.add(record);
        }

        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() != 200) {
            log.error("download page url [{}] error!status is [{}]", requestUrl, httpPage.getStatusCode());
            if (httpPage.getStatusCode() != 404) {
                requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            }
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }


        if (requestUrl.matches(SERIES_ENTRANCE_URL)) {
            seriesEntranceUrl(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(SERIES_AUTO_URL)) {
            seriesAutoUrl(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(PRAISE_JSON_SOURCE_URL2)) {
            jsonUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if(requestUrl.matches(PRAISE_ITEM_URL)){
            itemUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if (requestUrl.matches(COMMENT_URL)) {
            commentUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_REPLY_URL)) {
            commentReplyUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }
        return parsedLinks;
    }

    //进入站点获取对应的域名（dongchedi/dcdapp）
    private void seriesEntranceUrl(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        List keywords = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagObjVal(KEYWORDS, List.class);
        crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(KEYWORDS);
        String domain = getString("懂车帝 www.\\S*.com", httpPage.getRawText());
        domain = StringUtils.isBlank(domain) ? getString("www.dongchedi.com", httpPage.getRawText()) : domain;
        if (StringUtils.isBlank(domain)) {
            log.error("can not get domain!");
            return;
        }

        domain = domain.substring(domain.indexOf(".") + 1, domain.lastIndexOf("."));
        for (Object keyword : keywords) {
            String url = String.format(SERIES_SOURCE_URL, domain, keyword.toString());
            CrawlerRequestRecord seriesRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            seriesRecord.tagsCreator().bizTags().addCustomKV(DOMAIN_NAME,domain);
            parsedLinks.add(seriesRecord);
        }
    }

    //进入车系页采集对应的板块
    private void seriesAutoUrl(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String domainName = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(DOMAIN_NAME);
        String brand = httpPage.getHtml().xpath("//*[@id=\"__next\"]/div/div[2]/div[1]/p/span[3]/a").get();
        String seriesName = httpPage.getHtml().xpath("//*[@id=\"__next\"]/div/div[2]/div[1]/p/span[4]/text()").get();
        String seriesId = requestUrl.substring(requestUrl.lastIndexOf("/") + 1);
        if (StringUtils.isBlank(brand) || StringUtils.isBlank(seriesName)) {
            log.error("brand or seriesName is null !");
            return;
        }

        List<Map<String, String>> seriesList = new ArrayList<>();
        Map<String, String> series = new HashMap<>();
        series.put("series_id", seriesId);
        series.put("series_name", seriesName);
        series.put("series_url", requestUrl);
        seriesList.add(series);
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Series, seriesList);
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Brand, brand);

        try {
            CrawlerRequestRecord praiseJsonRecord = getPraiseJsonRecord(crawlerRequestRecord, httpPage, domainName, seriesId);
            praiseJsonRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(praiseJsonRecord);
        } catch (Exception e) {
            log.error("praise url can not get! series url is [{}]", requestUrl);
        }
    }

    //板块列表页面
    private void jsonUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        List<String> all = new ArrayList<>();
        boolean hasMore = false;
        try {
            all = httpPage.getJson().jsonPath($_type + ".data.review_list").all();
            String data = httpPage.getJson().jsonPath($_type + ".data").get();
            JSONObject jsonObject = JSONObject.parseObject(data);
            String has_more = jsonObject.getString("has_more");
            if (has_more.equals("true")) {
                hasMore = true;
            }
        } catch (Exception e) {
            log.error("json url page download is error!page [{}] will retry", requestUrl);
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        //添加信息标签
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(Field_Path, requestUrl);
        String domainName = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(DOMAIN_NAME);

        Map<String, String> mapFormUrl = getMapFormUrl(requestUrl);
        try {
            if (hasMore) {
                String seriesId = mapFormUrl.get("series_id");
                crawlerRequestRecord.tagsCreator().bizTags().addCustomKV("mapFormUrl",mapFormUrl);
                CrawlerRequestRecord praiseJsonRecord = getPraiseJsonRecord(crawlerRequestRecord, httpPage, domainName, seriesId);
                parsedLinks.add(praiseJsonRecord);
            } else {
                log.error("url [{}] is has not more!", requestUrl);
            }

            if(all.size() > 0){
                userRecordList(crawlerRequestRecord,domainName,parsedLinks,all,"info.user_info.user_id");
            }
            this.getArticleRecord(crawlerRequestRecord,httpPage,parsedLinks,all);//获取文章record
            //解析文章对应的回复
            CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
            if(resultTags.hasDataType(comment)){
                resultTags.getCategoryTag().removeLabelTag(comment.enumVal());
                CrawlerRequestRecord commentFilter = null;
                if ((commentFilter = resultTags.getCategoryTag().getKVTagObjVal(COMMENT_FILTER_INFO, CrawlerRequestRecord.class)) == null) {
                    log.error("comment filter info can not is null!");
                    return;
                }
                //文章对应的回复
                if (all.size() > 0) {
                    Boolean isWash = false;
                    for (String data : all) {
                        JSONObject jsonObject = JSONObject.parseObject(data);
                        String releaseTime = jsonObject.getString("create_time");
                        long releaseTimeToLong = Long.parseLong(releaseTime + "000");
                        String groupId = jsonObject.getString("gid_str");
                        CrawlerRequestRecord commentRecord = getCommentRecord(crawlerRequestRecord, domainName, "0", groupId);
                        if(!isDateRange(commentRecord,releaseTimeToLong)){
                            continue;
                        }
                        isWash = true;
                        commentRecord.setFilter(commentFilter.getFilter());
                        commentRecord.setFilterInfos(commentFilter.getFilterInfos());
                        commentRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                        parsedLinks.add(commentRecord);
                    }
                    if(!isWash){
                        crawlerRequestRecord.setNeedWashPage(false);
                        parsedLinks.clear();
                    }
                } else {
                    log.error("json url [{}] page get item size is zero!", requestUrl);
                    crawlerRequestRecord.setNeedWashPage(false);
                    parsedLinks.clear();
                }
            }

        } catch (Exception e) {
            log.error("praise next url [{}] get is error!", requestUrl);
            crawlerRequestRecord.setNeedWashPage(false);
        }
    }
    //获取文章record
    private void getArticleRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, List<String> all) {


    }

    //文章record解析生成comment record和user record
    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        try{
            String result = httpPage.getHtml().xpath("//script[@crossorigin=\"anonymous\"]").get();
            if(StringUtils.isBlank(result)){
                log.error("item url [{}] download error!",requestUrl);
                requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
                crawlerRequestRecord.setNeedWashPage(false);
                return;
            }
            new Json(result).jsonPath($_type + ".props.pageProps.article").get();
        }catch (Exception e){
            log.error("article url [{}] can load finish!detail is [{}]",requestUrl,httpPage.getRawText());
            requestAgainCrawlerRecord(parsedLinks,crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        String trueUrl = httpPage.getHtml().xpath("//link[@rel=\"canonical\"]/@href").get();
        String domainName = trueUrl.substring(trueUrl.indexOf(".") + 1, trueUrl.lastIndexOf("."));
        String groupId = requestUrl.substring(requestUrl.lastIndexOf("/") + 1);
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(DOMAIN_NAME,domainName);


        //解析文章对应的回复
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if(resultTags.hasDataType(comment)){
            resultTags.getCategoryTag().removeLabelTag(comment.enumVal());
            CrawlerRequestRecord commentFilter = null;
            if ((commentFilter = resultTags.getCategoryTag().getKVTagObjVal(COMMENT_FILTER_INFO, CrawlerRequestRecord.class)) == null) {
                log.error("comment filter info can not is null!");
                return;
            }

            CrawlerRequestRecord commentRecord = getCommentRecord(crawlerRequestRecord, domainName, "0", groupId);
            commentRecord.setFilter(commentFilter.getFilter());
            commentRecord.setFilterInfos(commentFilter.getFilterInfos());
            commentRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(commentRecord);
        }

        //获取作者record
        String result = httpPage.getHtml().xpath("//script[@crossorigin=\"anonymous\"]").get();
        String userId = new Json(result).jsonPath($_type + ".props.pageProps.article.motor_profile_info.user_id").get();
        CrawlerRequestRecord userRecord = getUserRecord(crawlerRequestRecord, domainName, userId);
        parsedLinks.add(userRecord);

    }

    //comment record解析下一页record和评论的回复record
    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        String domainName = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(DOMAIN_NAME);
        String cursor = "";
        //判断页面下载是否正确
        try {
            cursor = httpPage.getJson().jsonPath($_type + ".data.cursor").get();
        } catch (Exception e) {
            log.error("comment url download is error!page [{}] is will retry", crawlerRequestRecord.getHttpRequest().getUrl());
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        //评论翻页
        Map<String, String> mapFormUrl = getMapFormUrl(requestUrl);
        String group_id = mapFormUrl.get("group_id");
        if ("true".equals(httpPage.getJson().jsonPath($_type + ".data.has_more").get())) {
            CrawlerRequestRecord commentRecord = getCommentRecord(crawlerRequestRecord, domainName, cursor, group_id);
            parsedLinks.add(commentRecord);
        }

        //获取评论的评论
        List<String> all = httpPage.getJson().jsonPath($_type + ".data.comment_data").all();
        for (String data : all) {
            JSONObject commentJson = JSONObject.parseObject(data);
            int reply_count = commentJson.getIntValue("reply_count");
            int count = commentJson.getJSONObject("reply_data").getIntValue("count");
            if(count >= reply_count){
                if(count == 0){
                    continue;
                }
                if(commentJson.getJSONObject("reply_data").getJSONArray("reply_list") != null){
                    List<String> list = commentJson.getJSONObject("reply_data").getJSONArray("reply_list").toJavaList(String.class);
                    if (list.size() == 0){
                        continue;
                    }
                    userRecordList(crawlerRequestRecord,domainName,parsedLinks,list,"profile_info.user_id");
                }
            }else{
                String commentId = commentJson.getString("comment_id_str");
                if (StringUtils.isBlank(commentId)) {
                    log.error("id can not is null!url is [{}]",requestUrl);
                    continue;
                }

                CrawlerRequestRecord commentReplyRecord = getCommentReplyRecord(crawlerRequestRecord, domainName, commentId, cursor, group_id);
                commentReplyRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                parsedLinks.add(commentReplyRecord);
            }
        }

        //获取评论的粉丝数
        userRecordList(crawlerRequestRecord,domainName,parsedLinks,all,"profile_info.user_id");
    }

    //comment reply record解析下一页record和userFansRecord
    private void commentReplyUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String domainName = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(DOMAIN_NAME);
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        //回复的回复翻页
        JSONObject data = new JSONObject();
        try {
            data = JSONObject.parseObject(httpPage.getJson().jsonPath($_type + ".data").get());
        } catch (Exception e) {
            log.error("comment reply page download is error!page [{}] will retry" + crawlerRequestRecord.getHttpRequest().getUrl());
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        Map<String, String> mapFormUrl = getMapFormUrl(requestUrl);
        String commentId = mapFormUrl.get("comment_id");
        String groupId = mapFormUrl.get("group_id");
        if (data.getBoolean("has_more")) {
            String cursor = data.getString("cursor");
            CrawlerRequestRecord commentReplyRecord = getCommentReplyRecord(crawlerRequestRecord, domainName, commentId, cursor, groupId);
            parsedLinks.add(commentReplyRecord);
        }

        //获取作者的粉丝数
        List<String> all = httpPage.getJson().jsonPath($_type + ".data.reply_list").all();
        userRecordList(crawlerRequestRecord,domainName,parsedLinks,all,"profile_info.user_id");
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        Map<String, String> userFansMap = new HashMap<>();
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            String requestUrl = internalDownloadRecord.getInternalDownloadPage().getRequest().getUrl();
            if (requestUrl.matches(USER_URL)) {
                String userId = requestUrl.substring(requestUrl.lastIndexOf("/") + 1);
                try {
                    Html html = internalDownloadRecord.getInternalDownloadPage().getHtml();
                    String fans = html.xpath("//span[text()='粉丝']/preceding-sibling::span[1]/text()").get();
                    fans = StringUtils.isBlank(fans) ? "0" : fans;
                    userFansMap.put(userId,fans);
                } catch (Exception e) {
                    log.error("user fans [{}] get error!", requestUrl);
                    userFansMap.put(userId,"0");
                }
            }
        }

        Map<String,Object> extras =  new HashMap<>();
        if(crawlerRecord.getHttpRequest().getExtras() != null){
            extras.putAll(crawlerRecord.getHttpRequest().getExtras());
        }
        extras.put("userFansMap", userFansMap);
        crawlerRecord.getHttpRequest().setExtras(extras);
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        String requestUrl = page.getRequest().getUrl();

        if(requestUrl.matches(PRAISE_JSON_SOURCE_URL2)){
            if(crawlerResultTags.hasDataType(article)){
                crawlerDataList.addAll(washJsonUrlArticleList(crawlerRecord, page));
            }

            if(crawlerResultTags.hasDataType(interaction)){
                crawlerDataList.addAll(washJsonUrlInteractionList(crawlerRecord,page));
            }
        }else{
            if(crawlerResultTags.hasDataType(article)){
                crawlerDataList.add(washArticle(crawlerRecord,page));
            }

            if(crawlerResultTags.hasDataType(interaction)){
                crawlerDataList.add(washInteraction(crawlerRecord,page));
            }
        }

        if (crawlerResultTags.hasDataType(comment)) {
            if (requestUrl.matches(COMMENT_URL)) {
                crawlerDataList.addAll(washComment(crawlerRecord, page));
            } else if (requestUrl.matches(COMMENT_REPLY_URL)) {
                crawlerDataList.addAll(washCommentReply(crawlerRecord, page));
            }
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washJsonUrlArticleList(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String domainName = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(DOMAIN_NAME);
        List<String> all = httpPage.getJson().jsonPath($_type + ".data.review_list").all();
        Map<String, String> userFansMap = (Map<String, String>) crawlerRequestRecord.getHttpRequest().getExtras().get("userFansMap");
        for (String data : all) {
            JSONObject info = JSONObject.parseObject(data);
            String articleKey = info.getString("gid_str");//文章id
            JSONObject userInfo = info.getJSONObject("user_info");

            String author = userInfo.getString("name");
            String authorId = userInfo.getString("user_id");
            String follows = userFansMap.get(authorId);
            follows = StringUtils.isBlank(follows) ? "0" : follows;
            String releaseTime = info.getString("create_time");
            List<JSONObject> image_urls = new ArrayList<>();
            if(info.getJSONArray("image_urls") != null){
               image_urls = info.getJSONArray("image_urls").toJavaList(JSONObject.class);
            }
            StringBuffer imagBf = new StringBuffer();
            for (JSONObject image_url : image_urls) {
                imagBf.append(image_url.getString("url")).append("\\x01");
            }
            String cotent = info.getString("content");
           // cotent = StringUtils.isBlank(cotent) ? info.getString("title") : cotent;
            String year = info.getString("year");
            String series_name = info.getString("series_name");
            String car_name = info.getString("car_name");
            String title = year +series_name +car_name;

            JSONObject buy_car_info = info.getJSONObject("buy_car_info");
            String address = null;
            String price = null;
            String fuelEconomy = null;
            if (buy_car_info != null){
                address = buy_car_info.getString("location");//购车地
                price = buy_car_info.getString("price");
                price = String.valueOf(Double.parseDouble(price.split("万")[0]) * 10000);//裸车价
                fuelEconomy = buy_car_info.getString("consumption");//百公里油耗
                fuelEconomy = fuelEconomy + "/100km";
            }
            String isElite = info.getString("fuel_form");
            String eliteLevel  =null;
            if (isElite.equals("1")){
                isElite = "是";
                eliteLevel = "精华";
            }else{
                isElite = "否";
                eliteLevel = "问答";
            }

            String itemUrl = String.format(PRAISE_ITEM_SOURCE_URL,domainName,articleKey);

            long releaseTimeToLong = Long.parseLong(releaseTime + "000");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(article)
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_Title, "评价车型：" + title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Author_Follows, follows)
                    .addContentKV(Field_Content, cotent)
                    .addContentKV(Field_Praise_Address_Buy, address)
                    .addContentKV(Field_Praise_Price_Buy, price)
                    .addContentKV(Field_Praise_Fuel_Economy, fuelEconomy)
                    .addContentKV(Field_Images, imagBf.toString())
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .flowInPipelineTag(KAFKA)
                    .build();
            Map<String, String> carModel = new HashMap<>();
            carModel.put("model_name", title);
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Car_Model, carModel);
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Is_Elite, isElite);
            crawlerData.tagsCreator().bizTags().addCustomKV("eliteLevel",eliteLevel);//1:精华
            crawlerData.setFilterPipelineResult(true);
            crawlerDataList.add(crawlerData);

            CrawlerData itemUrlData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, articleKey))
                    .url(itemUrl)
                    .releaseTime(Long.parseLong(releaseTime + "000"))
                    .addContentKV("itemUrl",itemUrl)
                    .addContentKV("releaseTimeToLong",String.valueOf(releaseTimeToLong))
                    .flowInPipelineTag(REDIS)
                    .build();
            itemUrlData.setFilterPipelineResult(true);
            crawlerDataList.add(itemUrlData);
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washJsonUrlInteractionList(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String domainName = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(DOMAIN_NAME);
        List<String> all = httpPage.getJson().jsonPath($_type + ".data.review_list").all();
        for (String data : all) {
            JSONObject info = JSONObject.parseObject(data);
            String articleKey = info.getString("gid_str");
            String releaseTime = info.getString("create_time");

            String comments = info.getString("comment_count_en");
            String likes = info.getString("digg_count_en");

            String itemUrl = String.format(PRAISE_ITEM_SOURCE_URL,domainName,articleKey);

            long releaseTimeToLong = Long.parseLong(releaseTime + "000");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, interaction.enumVal(), articleKey))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE,article.enumVal(), articleKey))
                    .resultLabelTag(interaction)
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_I_Comments, comments)
                    .addContentKV(Field_I_Likes, likes)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .flowInPipelineTag(KAFKA)
                    .build();
            crawlerData.setFilterPipelineResult(true);
            crawlerDataList.add(crawlerData);
        }
        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);
        Map<String, String> userFansMap = (Map<String, String>) crawlerRequestRecord.getHttpRequest().getExtras().get("userFansMap");
        String domainName = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(DOMAIN_NAME);

        String result = httpPage.getHtml().xpath("//script[@crossorigin=\"anonymous\"]").get();
        result = new Json(result).jsonPath($_type + ".props.pageProps.article").get();
        JSONObject info = JSONObject.parseObject(result);
        JSONObject user = info.getJSONObject("motor_profile_info");
        String author = user.getString("name");
        String authorId = user.getString("user_id");
        String follows = userFansMap.get(authorId);
        follows = StringUtils.isBlank(follows) ? "0" : follows;
        String releaseTime = info.getString("created_time");
        String content = info.getString("motor_title");
        List<JSONObject> image_urls = new ArrayList<>();
        if(info.getJSONArray("image_urls") != null){
            image_urls = info.getJSONArray("image_urls").toJavaList(JSONObject.class);
        }
        StringBuffer imagBf = new StringBuffer();
        for (JSONObject image_url : image_urls) {
            imagBf.append(image_url.getString("url")).append("\\x01");
        }
        //车系购买信息
        JSONObject buy_car_info = info.getJSONObject("buy_car_info");
        String car_name = buy_car_info.getString("car_name");
        String price = buy_car_info.getString("price");
        price = StringUtils.isBlank(price) ? "0" : String.valueOf(Double.parseDouble(price) * 10000);
        String seriesName = buy_car_info.getString("series_name");
        String address = buy_car_info.getString("location");
        String consumption = buy_car_info.getString("consumption");
        consumption = StringUtils.isBlank(consumption) ? "" :consumption + "L/100km";
        String boughtTime = buy_car_info.getString("bought_time");//购车时间
        String seriesId = info.getJSONObject("motor_car_info").getString("series_id");
        String isElite = info.getBoolean("have_essence") ? "是" : "否";//是否是精华

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(article)
                .url(itemUrl)
                .releaseTime(Long.parseLong(releaseTime + "000"))
                .addContentKV(Field_Title, "评价车型：" + car_name)
                .addContentKV(Field_Author, author)
                .addContentKV(Field_Author_Id, authorId)
                .addContentKV(Field_Author_Follows, follows)
                .addContentKV(Field_Content, content)
                .addContentKV(Field_Praise_Address_Buy, address)
                .addContentKV(Field_Praise_Price_Buy, price)
                .addContentKV(Field_Praise_Fuel_Economy, consumption)
                .addContentKV(Field_Images, imagBf.toString())
                .addContentKV(Field_Praise_Time_Buy,boughtTime + "000")
                .flowInPipelineTag(KAFKA)
                .build();
        Map<String, String> carModel = new HashMap<>();
        carModel.put("model_name", car_name);
        crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Car_Model, carModel);
        crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Is_Elite, isElite);
        List<Map<String, String>> seriesList = new ArrayList<>();
        Map<String, String> series = new HashMap<>();
        series.put("series_id", seriesId);
        series.put("series_name", seriesName);
        series.put("series_url", String.format(SERIES_SOURCE_URL,domainName,seriesId));
        seriesList.add(series);
        crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series, seriesList);
//        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Brand, brand);//品牌未找到

        crawlerData.setFilterPipelineResult(true);
        return crawlerData;
    }

    private CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String result = httpPage.getHtml().xpath("//script[@crossorigin=\"anonymous\"]").get();
        result = new Json(result).jsonPath($_type + ".props.pageProps.article").get();
        JSONObject info = JSONObject.parseObject(result);
        String comments = info.getString("comment_count");
        String views = info.getString("read_count");
        String share_count = info.getString("share_count");
        String likes = info.getString("digg_count");
        String releaseTime = info.getString("created_time");

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, interaction.enumVal(), articleKey))
                .resultLabelTag(interaction)
                .url(itemUrl)
                .releaseTime(new Long(releaseTime + "000"))
                .addContentKV(Field_I_Comments, comments)
                .addContentKV(Field_I_Views, views)
                .addContentKV(Field_I_Likes, likes)
                .addContentKV(Field_I_Forwards,share_count)
                .flowInPipelineTag(KAFKA)
                .build();
        crawlerData.setFilterPipelineResult(true);
        return crawlerData;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        Map<String, String> userFansMap = (Map<String, String>) crawlerRequestRecord.getHttpRequest().getExtras().get("userFansMap");

        List<String> allReplies = httpPage.getJson().jsonPath($_type + ".data.comment_data").all();
        for (String data : allReplies) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            getCommentData(crawlerRequestRecord,httpPage,crawlerDataList,jsonObject,userFansMap);

            //清洗评论的回复
            JSONObject reply_data = jsonObject.getJSONObject("reply_data");
            if(reply_data.getJSONArray("reply_list") != null){
                List<JSONObject> replyList = reply_data.getJSONArray("reply_list").toJavaList(JSONObject.class);
                for (JSONObject object : replyList) {
                    getCommentData(crawlerRequestRecord,httpPage,crawlerDataList,object,userFansMap);
                }
            }
        }
        return crawlerDataList;
    }

    private void getCommentData(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerData> crawlerDataList,JSONObject object,Map<String,String> userFansMap){
        String requestUrl = httpPage.getRequest().getUrl();
        String articleKey = object.getString("group_id");
        String commentId = object.getString("reply_id_str");
        commentId = StringUtils.isBlank(commentId) ? object.getString("comment_id_str") : commentId;
        JSONObject user = object.getJSONObject("profile_info");
        String author = user.getString("name");
        String authorId = user.getString("user_id");
        String follows = userFansMap.get(authorId);
        follows = StringUtils.isBlank(follows) ? "0" : follows;
        String releaseTime = object.getString("create_time");
        String content = object.getString("text");
        List<JSONObject> image_urls = new ArrayList<>();
        if(object.getJSONArray("image_urls") != null){
            image_urls = object.getJSONArray("image_urls").toJavaList(JSONObject.class);
        }
        StringBuffer imagBf = new StringBuffer();
        for (JSONObject image_url : image_urls) {
            imagBf.append(image_url.getString("url")).append("\\x01");
        }
        String likes = object.getString("digg_count");
        String comments = object.getString("reply_count");
        comments = StringUtils.isBlank(comments) ? "0" :comments;
        Long releaseTimeToLong = Long.parseLong(releaseTime + "000");
        String reply_to_reply_id_str = object.getString("reply_to_reply_id_str");
        String reply_to_comment_id_str = object.getString("reply_to_comment_id_str");

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, comment.enumVal(), commentId))
                .resultLabelTag(comment)
                .releaseTime(releaseTimeToLong)
                .url(requestUrl)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                .addContentKV(Field_Author, author)
                .addContentKV(Field_Author_Id, authorId)
                .addContentKV(Field_Author_Follows, washUserFans(follows))
                .addContentKV(Field_Content, content)
                .addContentKV(Field_Images, imagBf.toString())
                .addContentKV("reply_to_reply_id",reply_to_reply_id_str)
                .addContentKV("reply_to_comment_id",reply_to_comment_id_str)
                .flowInPipelineTag(KAFKA)
                .build();
        crawlerData.setFilterPipelineResult(true);
        crawlerDataList.add(crawlerData);

        CrawlerData crawlerDataInteraction = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, comment.enumVal(), commentId))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), SITE, comment.enumVal(),interaction, commentId))
                .resultLabelTag(interaction)
                .releaseTime(releaseTimeToLong)
                .url(requestUrl)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                .addContentKV(Field_I_Likes, likes)
                .addContentKV(Field_I_Comments, comments)
                .flowInPipelineTag(KAFKA)
                .build();
        crawlerDataInteraction.setFilterPipelineResult(true);
        crawlerDataList.add(crawlerDataInteraction);
    }

    private List<CrawlerData> washCommentReply(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        Map<String, String> userFansMap = (Map<String, String>) crawlerRequestRecord.getHttpRequest().getExtras().get("userFansMap");

        List<String> all = httpPage.getJson().jsonPath($_type + ".data.reply_list").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            getCommentData(crawlerRequestRecord,httpPage,crawlerDataList,jsonObject,userFansMap);
        }
        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {////dcdapp_request_again
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()//删除过滤条件
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> keywords = new ArrayList<>();
        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");
            keywords.add(keyword);
        }
        requestRecord.tagsCreator().bizTags().addCustomKV(KEYWORDS, keywords);
    }

    private static String washUserFans(String fans) {
        String userFans = "0";
        if (StringUtils.isBlank(fans)) {
            return userFans;
        } else {
            userFans = fans.contains("万") ? String.valueOf(Double.parseDouble(fans.split("万")[0]) * 10000).split("\\.")[0] : fans;
        }
        return userFans;
    }

    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    //口碑record获取
    private CrawlerRequestRecord getPraiseJsonRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, String domainName, String seriesId) throws UnsupportedEncodingException {
        KVTag mapFormUrl = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("mapFormUrl");
        Map<String,String> map = null;
        if (mapFormUrl != null){
            map = (Map<String, String>) mapFormUrl.getVal();
        }
        String page = null;
        if (map != null){
            String s = map.get("page");
            Integer integer = Integer.valueOf(s);
             page = String.valueOf(integer+1);
        }else{
             page = "1";
        }
        if (page ==null){
            log.error("列表链接翻页获取错误");
        }
        String url = String.format(PRAISE_JSON_SOURCE_URL1,seriesId,page);
        url = url + "%E7%BB%BC%E5%90%88";
        CrawlerRequestRecord praiseRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();
        return praiseRecord;
    }

    //获取文章record
    private CrawlerRequestRecord getItemRecord(CrawlerRequestRecord crawlerRequestRecord, String data, String domainName) {
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        JSONObject jsonObject = JSONObject.parseObject(data);
        JSONObject tag_list = jsonObject.getJSONObject("tag_list");
        if (tag_list != null) {
            log.error("tag_list is [{}]", tag_list);
            return null;
        }

        String releaseTime = jsonObject.getJSONObject("info").getString("display_time");
        String unique_id = jsonObject.getString("unique_id");
        if (StringUtils.isBlank(releaseTime) || StringUtils.isBlank(unique_id)) {
            log.error("releaseTime or unique_id is null ! data is [{}] , url is [{}]", data, requestUrl);
            return null;
        }

        String itemUrl = String.format(PRAISE_ITEM_SOURCE_URL, domainName, unique_id);
        long releaseTimeToLong = Long.parseLong(releaseTime + "000");
        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(itemUrl)
                .releaseTime(releaseTimeToLong)
                .copyBizTags()
                .copyResultTags()
                .build();
        Map<String, Object> extras = new HashMap<>();
        extras.put("info", jsonObject);
        itemRecord.getHttpRequest().setExtras(extras);
        return itemRecord;
    }

    //获取评论翻页record
    private CrawlerRequestRecord getCommentRecord(CrawlerRequestRecord crawlerRequestRecord,String domainName, String cursor,String groupId) {
        String url = String.format(COMMENT_SOURCE_URL,domainName,groupId,cursor);
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .resultLabelTag(comment)
                .build();
        turnRecord.getHttpRequest().addExtra("articleKey",groupId);
        return turnRecord;
    }

    //获取评论的回复record
    private CrawlerRequestRecord getCommentReplyRecord(CrawlerRequestRecord crawlerRequestRecord,String domainName, String commentId,String cursor,String groupId){
        String url = String.format(COMMENT_REPLY_SOURCE_URL, domainName, commentId,0, groupId);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .resultLabelTag(comment)
                .build();
        record.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        return record;
    }
    private void userRecordList(CrawlerRequestRecord crawlerRequestRecord, String domainName,List<CrawlerRequestRecord> parsedLinks,List<String> all,String jsonPath){
        Set<String> userIdSet = new HashSet<>();
        //解析文章对应的作者
        for (String data : all) {
            String dataResult = data;
            String user_id = null;
            JSONObject jsonObject = JSONObject.parseObject(dataResult);
            if (jsonPath.equals("profile_info.user_id")){
                JSONObject profile_info = jsonObject.getJSONObject("profile_info");
                user_id = profile_info.getString("user_id_str");
            }else{
                JSONObject jsonObject1 = jsonObject.getJSONObject("user_info");
                user_id = jsonObject1.getString("user_id");
            }

            if(StringUtils.isNotBlank(user_id)){
                userIdSet.add(user_id);
            }
        }
        for (String userId : userIdSet) {
            CrawlerRequestRecord userRecord = getUserRecord(crawlerRequestRecord, domainName, userId);
            parsedLinks.add(userRecord);
        }
    }

    //获取作者的粉丝数record
    private CrawlerRequestRecord getUserRecord(CrawlerRequestRecord crawlerRequestRecord, String domainName, String userId) {
        String url = String.format(USER_SOURCE_URL, domainName, userId);
        CrawlerRequestRecord authorRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .needWashed(false)
                .needParsed(false)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        return authorRecord;
    }

    private Map<String, String> getMapFormUrl(String requestUrl) {
        Map<String, String> map = new HashMap<>();
        String[] split = requestUrl.split("\\?");
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            map.put(name, value);
        }
        return map;
    }

    /**
     * 判断是否在时间范围内
     *
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord, Long releaseTimeToLong) {
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    } else if (hourFromNow != 0) {
                        endTime = System.currentTimeMillis();
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if (startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime) {
                isRange = true;
            }
        } else {
            isRange = true;
        }
        return isRange;
    }

    public static void main(String[] args) {
        System.out.println(washUserFans("3.21万"));
    }
}
