package com.chance.cc.crawler.development.scripts.eastMoney;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConstant;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/2/4 19:07
 * @Description 东方财富网 关键词采集
 **/
public class EastMoneyKeywordCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(EastMoneyKeywordCrawlerScript.class);
    private static final String DOMAIN = "eastmoney";
    private static final String SITE = "searchKw";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_keyword_request_again";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";

    private static final String ENTRANCE_URL = "http://so.eastmoney.com/web/s";
    private static final String SOURCE_URL = "http://so.eastmoney.com/[a-zA-z]*/s\\?keyword=\\S*";
    private static final String JSON_URL = "http://searchapi.eastmoney.com/bussiness/Web/Get[CMS]*SearchList\\S*";

    private static final String ITEM_URL = "http[s]*://\\S*.eastmoney.com/news/\\d+,\\d+.html";
    private static final String INTERACTION_SOURCE_URL = "https://gbapi.eastmoney.com/abstract/api/PostShort/NewsArticleBriefInfo?type=1&version=80008000&product=guba&plat=web&postid=%s";
    private static final String INTERACTION_URL = "https://gbapi.eastmoney.com/abstract/api/PostShort/NewsArticleBriefInfo\\S*";
    private static final String COMMENT_SOURCE_URL = "http://gbapi.eastmoney.com/reply/JSONP/ArticleNewReplyList?callback=jQuery11230163358391088672_1623726163310&plat=web&version=300&product=guba&postid=%s&sort=1&sorttype=1&p=1&ps=20&type=1";
    private static final String COMMENT_URL = "http://gbapi.eastmoney.com/reply/JSONP/ArticleNewReplyList\\S*";
    private static final String COMMENT_REPLY_SOURCE_URL = "http://gbapi.eastmoney.com/reply/JSONP/ArticleReplyDetail?callback=jQuery1123003220043720322785_1623732896856&plat=web&version=300&product=guba&postid=%s&replyid=%s&sort=1&sorttype=1&ps=10&p=1&type=1";
    private static final String COMMENT_REPLY_URL = "http://gbapi.eastmoney.com/reply/JSONP/ArticleReplyDetail\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(JSON_URL);
        addUrlRegular(ITEM_URL);
        addUrlRegular(INTERACTION_URL);
        addUrlRegular(COMMENT_URL);
        addUrlRegular(COMMENT_REPLY_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return SITE.equals(site);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();

        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }

        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() == 503) {
            log.error(DOMAIN + " keyword page download error!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        if (requestUrl.matches(JSON_URL)) {
            jsonUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(ITEM_URL)) {
            itemUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_URL)) {
            commentUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_REPLY_URL)) {
            commentReplyUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        return parsedLinks;
    }

    private void jsonUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String success = httpPage.getJson().jsonPath($_type + ".IsSuccess").get();
        if (!"true".equals(success)) {
            String code = httpPage.getJson().jsonPath($_type + ".Code").get();
            log.error("json url down load is err! code is :" + code + ",will retry!");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        String requestUrl = httpPage.getRequest().getUrl();
        //翻页
        String[] split = requestUrl.split("\\?");
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        int currentPage = 1;
        String nextUrl = split[0] + "?";
        for (NameValuePair nameValuePair : parse) {
            if ("pageindex".equals(nameValuePair.getName())) {
                currentPage = Integer.parseInt(nameValuePair.getValue());
                nextUrl = nextUrl + nameValuePair.getName() + "=" + (currentPage + 1) + "&";
            } else if ("keyword".equals(nameValuePair.getName())) {
                try {
                    nextUrl = nextUrl + nameValuePair.getName() + "=" + URLEncoder.encode(nameValuePair.getValue(), "UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
                    log.error(e.getMessage());
                }
            } else {
                nextUrl = nextUrl + nameValuePair.getName() + "=" + nameValuePair.getValue() + "&";
            }
        }
        nextUrl = nextUrl.substring(0, nextUrl.length() - 1);
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                .build();
        parsedLinks.add(turnRecord);

        //详情
        List<String> all = httpPage.getJson().jsonPath($_type + ".Data").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String itemUrl = jsonObject.getString("Art_Url");

            String releaseTime = jsonObject.getString("Art_CreateTime");
            if (StringUtils.isBlank(itemUrl) || StringUtils.isBlank(releaseTime)) {
                continue;
            }

            try {
                long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();

        String requestId = requestUrl.substring(requestUrl.lastIndexOf(",") + 1, requestUrl.lastIndexOf("."));
        //新版格式
        if (resultTags.hasDataType(interaction)) {
            crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag("interaction");
            String url = String.format(INTERACTION_SOURCE_URL, requestId);
            CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .resultLabelTag(interaction)
                    .build();
            interactionRecord.getHttpRequest().addExtra("articleUrl", requestUrl);
            parsedLinks.add(interactionRecord);
        }

        if (resultTags.hasDataType(comment)) {
            crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag("comment");
            CrawlerRecord commentFilter = null;
            if ((commentFilter = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class)) == null) {
                log.error("comment filter can not null!");
                return;
            }
            String url = String.format(COMMENT_SOURCE_URL, requestId);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .needWashed(true)
                    .resultLabelTag(comment)
                    .build();
            commentRecord.setFilterInfos(commentFilter.getFilterInfos());
            commentRecord.setFilter(commentFilter.getFilter());
            parsedLinks.add(commentRecord);
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        parsedLinks.add(getNextCommentUrlRecord(crawlerRequestRecord, httpPage));

        //评论回复
        Map<String, String> mapByUrl = getMapByUrl(requestUrl);
        String callback = mapByUrl.get("callback");
        List<String> all = httpPage.getJson().removePadding(callback).jsonPath($_type + ".re").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            int replyCount = jsonObject.getIntValue("reply_count");
            if (replyCount > 0) {
                String postId = mapByUrl.get("postid");
                String replyId = jsonObject.getString("reply_id");
                String url = String.format(COMMENT_REPLY_SOURCE_URL, postId, replyId);
                CrawlerRequestRecord replyRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .needWashed(true)
                        .copyResultTags()
                        .build();
                parsedLinks.add(replyRecord);
            }
        }
    }

    private void commentReplyUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        parsedLinks.add(getNextCommentUrlRecord(crawlerRequestRecord, httpPage));
    }


    //获取评论或者评论回复的下一页record
    private CrawlerRequestRecord getNextCommentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage ){
        String requestUrl = httpPage.getRequest().getUrl();
        //评论翻页
        String commentUrl = getNextUrl(requestUrl, null, "p");
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();
        return commentRecord;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRecord, page));
        }
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf(",") + 1, itemUrl.lastIndexOf("."));

        String title = httpPage.getHtml().xpath("//div[@class=\"newsContent\"]/h1/text()|//div[@class=\"title\"]").get();
        String author = httpPage.getHtml().xpath("//div[@class=\"author\"]/text()").get();
        if (StringUtils.isNotBlank(author)) {
            author = author.substring(author.lastIndexOf("：") + 1);
        } else {
            author = "";
        }
        String source = httpPage.getHtml().xpath("//div[@class=\"source data-source\"]/@data-source").get();
        if(StringUtils.isBlank(source)){
            List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"infos\"]/div[@class=\"item\"]").nodes();
            for (Selectable node : nodes) {
                String text = node.xpath("./text()").get();
                if(text.contains("来源：")){
                    source=node.xpath("./a").get();
                    source=StringUtils.isBlank(source) ? text.split("：")[1] : source;
                    break;
                }
                if(text.contains("作者：")){
                    author=node.xpath("./a").get();
                    author=StringUtils.isBlank(author) ? text.split("：")[1] :author;
                }
            }
        }

        source = StringUtils.isNotBlank(source)&&source.contains("&#xD;\n") ?source.replace("&#xD;\n",""):source;
        author = StringUtils.isNotBlank(author)&&author.contains("&#xD;\n") ?author.replace("&#xD;\n",""):author;
        String releaseTime = httpPage.getHtml().xpath("//div[@class=\"time\"]/text()|//div[@class=\"infos\"]/div[@class=\"item\"]").get();
        List<String> traces = httpPage.getHtml().xpath("//div[@class=\"abstract\"]//text()").all();
//        String tract = StringUtils.isNotEmpty(httpPage.getHtml().xpath("//div[@class=\"abstract\"]//text()").g) ? httpPage.getHtml().xpath("//div[@class=\"abstract\"]/text()").get() : "";
        String review = StringUtils.isNotEmpty(httpPage.getHtml().xpath("//div[@class=\"b-review\"]/text()").get()) ? httpPage.getHtml().xpath("//div[@class=\"b-review\"]/text()").get() : "";
        List<String> articleTextList = httpPage.getHtml().xpath("//div[@id=\"ContentBody\"]//p//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String data : articleTextList) {
            conents.append(data);
        }
        conents.append(review);
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml(articleText)).append(" ");
        }

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy年MM月dd日 HH:mm").getTime())
                    .addContentKV(Field_Content, getContentNoEs(conents.toString()))
                    .addContentKV(Field_Title, getContentNoEs(title))
                    .addContentKV(Field_Source, StringUtils.isNotBlank(source) ? source.trim() : "")
                    .addContentKV(Field_Author, StringUtils.isNotBlank(author) ? author.trim() : "")
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }

        return crawlerData;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        Map<String, String> mapByUrl = getMapByUrl(itemUrl);
        String articleKey = mapByUrl.get("postid");

        List<String> all = httpPage.getJson().jsonPath($_type + ".re").all();
        JSONObject jsonObject = JSONObject.parseObject(all.get(0));
        String views = jsonObject.getString("post_click_count");
        String comments = jsonObject.getString("post_comment_count");
        String forward = jsonObject.getString("post_forward_count");
        String likes = jsonObject.getString("post_like_count");

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                .url(itemUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_I_Views, views)
                .addContentKV(Field_I_Comments, comments)
                .addContentKV(Field_I_Forwards, forward)
                .addContentKV(Field_I_Likes, likes)
                .build();

        return crawlerData;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        if (requestUrl.matches(COMMENT_URL)) {
            crawlerDataList.addAll(getCommentData(crawlerRequestRecord, httpPage));
        } else {
            crawlerDataList.addAll(getCommentReplyData(crawlerRequestRecord, httpPage));
        }
        return crawlerDataList;
    }

    private List<CrawlerData> getCommentData(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        Map<String, String> mapByUrl = getMapByUrl(requestUrl);
        String articleKey = mapByUrl.get("postid");
        String callback = mapByUrl.get("callback");

        List<String> commentList = httpPage.getJson().removePadding(callback).jsonPath($_type + ".re").all();
        for (String data : commentList) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            JSONObject user = jsonObject.getJSONObject("reply_user");
            String author = user.getString("user_nickname");
            String authorId = user.getString("user_id");
            String releaseTime = jsonObject.getString("reply_publish_time");
            String content = jsonObject.getString("reply_text");
            String commentId = jsonObject.getString("reply_id");

            String comments = jsonObject.getString("reply_count");
            String likes = jsonObject.getString("reply_like_count");
//            JSONArray sourceReply = jsonObject.getJSONArray("source_reply");
//            if(sourceReply != null){
//                String replyId = sourceReply.getJSONObject(0).getString("source_reply_id");
//            }

            try {
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .url(httpPage.getRequest().getUrl())
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Author_Id, authorId)
                        .addContentKV(Field_Content, content)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .build();
                crawlerDataList.add(crawlerData);


                CrawlerData commentInteraction = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), commentId))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), interaction.enumVal(), commentId))
                        .resultLabelTag(valueOf("interaction"))
                        .url(httpPage.getRequest().getUrl())
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .addContentKV(Field_I_Likes, likes)
                        .addContentKV(Field_I_Comments, comments)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .build();
                crawlerDataList.add(commentInteraction);
            } catch (Exception e) {
                log.error(e.getMessage());
            }
        }
        return crawlerDataList;
    }

    private List<CrawlerData> getCommentReplyData(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        Map<String, String> mapByUrl = getMapByUrl(requestUrl);
        String articleKey = mapByUrl.get("postid");
        String callback = mapByUrl.get("callback");

        List<String> commentList = httpPage.getJson().removePadding(callback).jsonPath($_type + ".re.child_replys").all();
        for (String data : commentList) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            JSONObject user = jsonObject.getJSONObject("reply_user");
            String author = user.getString("user_nickname");
            String authorId = user.getString("user_id");
            String releaseTime = jsonObject.getString("reply_publish_time");
            String content = jsonObject.getString("reply_text");
            String commentId = jsonObject.getString("reply_id");

            String likes = jsonObject.getString("reply_like_count");
            try {
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .url(httpPage.getRequest().getUrl())
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Author_Id, authorId)
                        .addContentKV(Field_Content, content)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .build();
                crawlerDataList.add(crawlerData);


                CrawlerData commentInteraction = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), commentId))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), interaction.enumVal(), commentId))
                        .resultLabelTag(valueOf("interaction"))
                        .url(httpPage.getRequest().getUrl())
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .addContentKV(Field_I_Likes, likes)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .build();
                crawlerDataList.add(commentInteraction);
            } catch (Exception e) {
                log.error(e.getMessage());
            }
        }
        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error("eastMoney  download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpRequest(crawlerRecord.getHttpRequest())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .releaseTime(System.currentTimeMillis())
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpRequest(crawlerRecord.getHttpRequest())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .releaseTime(System.currentTimeMillis())
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }
        String method = crawlerRecord.getHttpRequest().getMethod();
        if (StringUtils.isNotBlank(method) && HttpConstant.Method.POST.equals(method)) {
            crawlerRequestRecord.getHttpRequest().setMethod(HttpConstant.Method.POST);
            crawlerRequestRecord.getHttpRequest().setRequestBody(crawlerRecord.getHttpRequest().getRequestBody());
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        String searchKwSourceUrl = (String) requestRecord.getHttpRequest().getExtras().get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchKwSourceUrl)) {
            log.error("searchKwSourceUrl can not is null!");
            return;
        }
        if (!searchKwSourceUrl.matches(SOURCE_URL)) {
            log.error("source url is not match!");
            return;
        }

        String[] split = searchKwSourceUrl.split("/");
        String jsonSourceUrl = getJsonSourceUrl(split[split.length - 2]);
        if (StringUtils.isBlank(jsonSourceUrl)) {
            log.error("module is not exit!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            String itemUrl = null;
            try {
                itemUrl = String.format(jsonSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(System.currentTimeMillis())
//                        .httpHead("Host", "searchapi.eastmoney.com")
                        .httpHead("Referer", String.format(searchKwSourceUrl, URLEncoder.encode(keyword, "UTF-8")))
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                crawlerRecords.add(turnRecord);
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }

    //根据采集的板块确定json的地址
    private static String getJsonSourceUrl(String module) {
        String jsonSourceUrl = "";
        switch (module) {
            case "web":
                jsonSourceUrl = "";
                break;
            case "quotation":
                jsonSourceUrl = "";
                break;
            case "news":
                jsonSourceUrl = "http://searchapi.eastmoney.com/bussiness/Web/GetCMSSearchList?type=8196&pageindex=1&pagesize=10&keyword=%s&name=zixun";
                break;
            case "Ann":
                jsonSourceUrl = "";
                break;
            case "Yanbao":
                jsonSourceUrl = "";
                break;
            case "CArticle":
                jsonSourceUrl = "";
                break;
            case "TieZi":
                jsonSourceUrl = "";
                break;
            case "QA":
                jsonSourceUrl = "";
                break;
            case "Blog":
                jsonSourceUrl = "";
                break;
            case "ZuHe":
                jsonSourceUrl = "";
                break;
            case "CAccount":
                jsonSourceUrl = "";
                break;
            case "baike":
                jsonSourceUrl = "";
                break;
        }
        return jsonSourceUrl;
    }

    private String getNextUrl(String requestUrl, String keyword, String page) {
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if (StringUtils.isNotBlank(page) && page.equals(name)) {
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            } else if (StringUtils.isNotBlank(keyword) && keyword.equals(name)) {
                try {
                    nextUrl = nextUrl + name + "=" + URLEncoder.encode(value, "UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
                    log.error(e.getMessage());
                }
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }
        }
        return nextUrl.substring(0, nextUrl.length() - 1);
    }

    private Map<String, String> getMapByUrl(String requestUrl) {
        Map<String, String> map = new HashMap<>();
        String[] split = requestUrl.split("\\?");
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            map.put(name, value);
        }
        return map;
    }

    //获取没有转义符号的字符串
    private static String getContentNoEs(String content){
        if(StringUtils.isBlank(content)){
            return "";
        }

        int time = 0;
        String result = content;
        while (content.contains("&") && content.contains(";")){
            result = StringEscapeUtils.unescapeHtml(result);
            if(time >= 5){
                break;
            }
            time ++;
        }
        return result;
    }

    private static String getString(String regx, String input) {

        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    public static void main(String[] args) {
        String contentNoEs = getContentNoEs("&amp;rdquo;");
        System.out.println(contentNoEs);
//        String news = getJsonSourceUrl("news");
//        System.out.println(news);
    }
}
