package com.chance.cc.crawler.development.scripts.dahe;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/3/12 11:12
 * @Description 大河网 采集脚本
 **/
public class DaHeCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(DaHeCrawlerScript.class);
    private static final String DOMAIN = "dahe";
    private static final String SITE = "searchKw";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";

    private static final String NEWS_PRIFIX = "https://s.dahe.cn";
    private static final String ENTRANCE_URL = "https://s.dahe.cn/";
    private static final String SEARCH_URL = "https://s.dahe.cn/\\?kw=\\S*&from=\\S*";
    private static final String ITEM_NEWS_URL = "https://[a-z]*.dahe.cn/\\d{4}/\\d+-\\d+/\\d+.html";
    private static final String FORUM_PRIFIX = "http://bbs.dahe.cn/";
    private static final String ITEM_FORUM_URL = "http://bbs.dahe.cn/thread-\\d+-1-1.html";
    private static final String COMMENT_FORUM_URL = "http://bbs.dahe.cn/thread-\\d+-\\d+-1.html#comment";
    private static final String COMMENT_NEWS_SOURCE_URL = "https://id.dahe.cn/dahe/service/comment/history?page=1&length=200&newsUrl=%s&newsId=%s&title=%s";
    private static final String COMMENT_NEWS_URL = "https://id.dahe.cn/dahe/service/comment/history\\S*";


    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(SEARCH_URL);
        addUrlRegular(ITEM_NEWS_URL);
        addUrlRegular(ITEM_FORUM_URL);
        addUrlRegular(COMMENT_FORUM_URL);
        addUrlRegular(COMMENT_NEWS_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String siteTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return siteTag.equals(SITE);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess()) {
            log.error(DOMAIN + " page download error!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        if (requestUrl.matches(SEARCH_URL)) {
            searchUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(ITEM_NEWS_URL)) {
            itemNewsUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(ITEM_FORUM_URL)) {
            itemForumUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_NEWS_URL)) {
            commentNewsUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_FORUM_URL)) {
            commentForumUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        return parsedLinks;
    }

    private void searchUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String keyword = (String) httpPage.getRequest().getExtras().get("keyword");
        if(!requestUrl.contains("page")){
            String result = washContent("找不到和 "+keyword+" 相符的内容或信息", httpPage.getRawText());
            if(StringUtils.isNotBlank(result)){
                log.error(result);
                return;
            }
        }
        //翻页
        String nextPage = httpPage.getHtml().xpath("//span[text()='下一页']/parent::a/@href").get();
        if (StringUtils.isNotBlank(nextPage)) {
            nextPage = NEWS_PRIFIX + StringEscapeUtils.unescapeHtml(nextPage);
            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPage)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            turnRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
            parsedLinks.add(turnRecord);
        }

        //列表页解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"result\"]/div").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath(".//a[@class=\"result-item-title-title\"]/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            }

            String releaseTime = node.xpath("./div[@class=\"result-item-tips\"]/text()").get();
            if (StringUtils.isBlank(releaseTime) || (StringUtils.isNotBlank(releaseTime) && !releaseTime.contains("发布时间："))) {
                continue;
            } else {
                releaseTime = releaseTime.split("发布时间：")[1].trim();
            }

            long releaseTimeToLong = 0;
            try {
                releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl + keyword)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void itemNewsUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        String url = "";

        try {
            String newsUrl = URLEncoder.encode(requestUrl, "UTF-8");
            String newsId = requestUrl.substring(requestUrl.lastIndexOf("/") + 1, requestUrl.lastIndexOf("."));
            String newsTitle = URLEncoder.encode(httpPage.getHtml().xpath("//title/text()").get(),"UTF-8");
            if (StringUtils.isBlank(newsTitle)) {
                crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag("comment");
                crawlerRequestRecord.getHttpRequest().addExtra("comment", "0");
                return;
            }

            url = String.format(COMMENT_NEWS_SOURCE_URL, newsUrl, newsId, newsTitle);
        } catch (UnsupportedEncodingException e) {
            log.error(e.getMessage());
        }

        if (resultTags.hasDataType(interaction)) {
            CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .needWashed(false)
                    .needParsed(false)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            parsedLinks.add(interactionRecord);
        }

        if (resultTags.hasDataType(comment)) {
            resultTags.getCategoryTag().removeLabelTag("comment");

            CrawlerRequestRecord filterRecord = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class);
            if (filterRecord == null) {
                log.error("filter record can not null !");
                return;
            }

            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .recordKey(url + "#comment")
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .needWashed(true)
                    .resultLabelTag(comment)
                    .build();
            commentRecord.setFilter(filterRecord.getFilter());
            commentRecord.setFilterInfos(filterRecord.getFilterInfos());
            commentRecord.getHttpRequest().addExtra("articleUrl",requestUrl);
            parsedLinks.add(commentRecord);
        }
    }


    private void itemForumUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (resultTags.hasDataType(comment)) {
            resultTags.getCategoryTag().removeLabelTag("comment");

            CrawlerRequestRecord filterRecord = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class);
            if (filterRecord == null) {
                log.error("filter record can not null !");
                return;
            }

            String comments = httpPage.getHtml().xpath("//span[text()='回复:']/following-sibling::span[1]/text()").get();
            //评论数要大于0
            if (StringUtils.isNotBlank(comments) && Integer.parseInt(comments) > 0) {
                //查找最后一页的评论
                String lastUrl = httpPage.getHtml().xpath("//a[@class=\"last\"]/@href").get();
                if (StringUtils.isNotBlank(lastUrl)) {
                    lastUrl = FORUM_PRIFIX + StringEscapeUtils.unescapeHtml(lastUrl) + "#comment";
                } else {
                    lastUrl = requestUrl + "#comment";
                }
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(lastUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .needWashed(true)
                        .resultLabelTag(comment)
                        .build();
                commentRecord.setFilter(filterRecord.getFilter());
                commentRecord.setFilterInfos(filterRecord.getFilterInfos());
                commentRecord.getHttpRequest().addExtra("articleUrl", requestUrl);
                parsedLinks.add(commentRecord);
            }
        }
    }

    private void commentNewsUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //评论翻页
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if ("page".equals(name)) {
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            } else {
                try {
                    nextUrl = nextUrl + name + "=" + URLEncoder.encode(value, "UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
                    log.error(e.getMessage());
                }
            }
        }
        nextUrl = nextUrl.substring(0, nextUrl.length() - 1);
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();
        commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        parsedLinks.add(commentRecord);

    }

    private void commentForumUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //评论翻页
        String url = httpPage.getHtml().xpath("//a[@class=\"prev\"]/@href").get();
        if (StringUtils.isBlank(url)) {
            return;
        } else {
            url = FORUM_PRIFIX + StringEscapeUtils.unescapeHtml(url) + "#comment";
        }

        String lastReleaseTime = httpPage.getHtml().xpath("//div[@id=\"postlist\"]/div[contains(@id,'post_')][1]//div[@class=\"authi\"]/em/text()").get();
        try {
            long longTime = DateUtils.parseDate(lastReleaseTime, "发表于 yyyy-MM-dd HH:mm:ss").getTime();
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(longTime)
                    .copyBizTags()
                    .needWashed(true)
                    .copyResultTags()
                    .build();
            commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
            parsedLinks.add(commentRecord);
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpPage downloadPage = internalDownloadRecord.getInternalDownloadPage();
            String rawText = downloadPage.getRawText();
            try {
                String prefix = rawText.substring(0, rawText.indexOf("("));
                String comments = downloadPage.getJson().removePadding(prefix).jsonPath($_type + ".obj.count").get();
                crawlerRecord.getHttpRequest().addExtra("comments", comments);
            } catch (Exception e) {
                log.error(e.getMessage());
                crawlerRecord.getHttpRequest().addExtra("comments", "0");
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        String requestUrl = page.getRequest().getUrl();
        if (crawlerResultTags.hasDataType(article)) {
            if (requestUrl.matches(ITEM_NEWS_URL)) {
                crawlerDataList.add(washNewsArticle(crawlerRecord, page));
            }
            if (requestUrl.matches(ITEM_FORUM_URL)) {
                crawlerDataList.add(washForumArticle(crawlerRecord, page));
            }
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            if (requestUrl.matches(ITEM_NEWS_URL)) {
                crawlerDataList.add(washNewsInteraction(crawlerRecord, page));
            }
            if (requestUrl.matches(ITEM_FORUM_URL)) {
                crawlerDataList.add(washForumInteraction(crawlerRecord, page));
            }
        }

        if (crawlerResultTags.hasDataType(comment)) {
            if(requestUrl.matches(COMMENT_NEWS_URL)){
                crawlerDataList.addAll(washNewsComment(crawlerRecord,page));
            }
            if(requestUrl.matches(COMMENT_FORUM_URL)){
                crawlerDataList.addAll(washForumComment(crawlerRecord, page));
            }
        }

        return crawlerDataList;
    }

    private CrawlerData washNewsArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        String title = httpPage.getHtml().xpath("//h1[@id=\"4g_title\"]/text()|//h2[@id=\"4g_title\"]/text()").get();
        String source = httpPage.getHtml().xpath("//p[@id=\"source_baidu\"]/text()|//span[@id=\"source_baidu\"]/text()").get();
        source = StringUtils.isNotEmpty(source) && source.contains("来源：") ? source.split("：")[1] : source;
        String releaseTime = httpPage.getHtml().xpath("//p[@id=\"pubtime_baidu\"]/text()|//span[@id=\"pubtime_baidu\"]/text()|//i[@id=\"pubtime_baidu\"]/text()").get();
        List<String> all = httpPage.getHtml().xpath("//div[@id=\"mainCon\"]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : all) {
            conents.append(articleText).append(" ");
        }

        CrawlerData crawlerData = null;
        try {
            long releaseTimeToLong = StringUtils.isNotBlank(releaseTime) ?
                    DateUtils.parseDate(releaseTime, "yyyy年MM月dd日HH:mm", "yyyy-MM-dd HH:mm").getTime()
                    : crawlerRequestRecord.getReleaseTime();
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_Content, conents.toString())
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Source, source)
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerData;
    }

    private CrawlerData washNewsInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        String releaseTime = httpPage.getHtml().xpath("//p[@id=\"pubtime_baidu\"]/text()|//span[@id=\"pubtime_baidu\"]/text()|//i[@id=\"pubtime_baidu\"]/text()").get();

        CrawlerData crawlerData = null;
        try {
            long releaseTimeToLong = StringUtils.isNotBlank(releaseTime) ?
                    DateUtils.parseDate(releaseTime, "yyyy年MM月dd日HH:mm", "yyyy-MM-dd HH:mm").getTime()
                    : crawlerRequestRecord.getReleaseTime();
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_I_Comments, (String) httpPage.getRequest().getExtras().get("comments"))
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerData;
    }

    private CrawlerData washForumArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.split("-")[1];

        String title = httpPage.getHtml().xpath("//span[@id=\"thread_subject\"]/text()").get();
        String author = httpPage.getHtml().xpath("//div[contains(@id,'post_')][1]//div[@class=\"authi\"]/a/text()").get();
        String releaseTime = httpPage.getHtml().xpath("//div[contains(@id,'post_')][1]//div[@class=\"authi\"]/em/text()").get();
        releaseTime = "发表于 ".equals(releaseTime) ? httpPage.getHtml().xpath("//div[@id=\"postlist\"]/div[contains(@id,'post_')][1]//div[@class=\"authi\"]/em/span/@title").get() : releaseTime;
        List<String> all = httpPage.getHtml().xpath("//div[contains(@id,'post_')][1]//div[@class=\"pct\"]//td[contains(@id,'postmessage_')]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : all) {
            conents.append(articleText).append(" ");
        }

        CrawlerData crawlerData = null;
        try {
            long releaseTimeToLong = StringUtils.isNotBlank(releaseTime) ?
                    DateUtils.parseDate(releaseTime, "发表于 yyyy-MM-dd HH:mm:ss", "yyyy-MM-dd HH:mm:ss").getTime()
                    : crawlerRequestRecord.getReleaseTime();
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_Content, conents.toString())
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerData;
    }

    public CrawlerData washForumInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.split("-")[1];

        String views = httpPage.getHtml().xpath("//span[text()='查看:']/following-sibling::span[1]/text()").get();
        views = StringUtils.isNotBlank(views) ? views : "0";
        String comments = httpPage.getHtml().xpath("//span[text()='回复:']/following-sibling::span[1]/text()").get();
        comments = StringUtils.isNotBlank(comments) ? comments : "0";
        String releaseTime = httpPage.getHtml().xpath("//div[contains(@id,'post_')][1]//div[@class=\"authi\"]/em/text()").get();
        releaseTime = "发表于 ".equals(releaseTime) ? httpPage.getHtml().xpath("//div[@id=\"postlist\"]/div[contains(@id,'post_')][1]//div[@class=\"authi\"]/em/span/@title").get() : releaseTime;
        try {
            long releaseTimeToLong = StringUtils.isNotBlank(releaseTime) ?
                    DateUtils.parseDate(releaseTime, "发表于 yyyy-MM-dd HH:mm:ss", "yyyy-MM-dd HH:mm:ss").getTime()
                    : crawlerRequestRecord.getReleaseTime();
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_I_Comments, comments)
                    .addContentKV(Field_I_Views, views)
                    .build();

            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return null;

    }

    public List<CrawlerData> washForumComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.split("-")[1];

        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@id=\"postlist\"]/div[contains(@id,'post_')]").nodes();
        for (int i = nodes.size() - 1; i >= 0; i--) {
            Selectable node = nodes.get(i);
            String isCrawler = node.xpath(".//td[@class=\"plc\"]//div[@class=\"pi\"]/div[@id=\"fj\"]/label").get();
            if (StringUtils.isNotBlank(isCrawler)) {
                //楼主发的内容
                continue;
            }

            String author = node.xpath(".//a[@class=\"xw1\"]/text()").get();
            List<String> all = node.xpath(".//div[@class=\"pct\"]//td[contains(@id,'postmessage_')]//text()").all();
            StringBuffer conents = new StringBuffer();
            for (String articleText : all) {
                if (StringUtils.isBlank(articleText)) {
                    continue;
                }
                conents.append(articleText).append(" ");
            }
            String releaseTime = node.xpath(".//div[@class=\"authi\"]/em/text()").get();
            releaseTime = "发表于 ".equals(releaseTime) ? httpPage.getHtml().xpath(".//div[@class=\"authi\"]/em/span/@title").get() : releaseTime;

            if (StringUtils.isBlank(releaseTime)) {
                continue;
            }
            String floor = node.xpath(".//td[@class=\"plc\"]//div[@class=\"pi\"]/strong/a/text()").get();
            if (StringUtils.isNotBlank(floor)) {
                if ("沙发".equals(floor)) {
                    floor = "2";
                } else if ("板凳".equals(floor)) {
                    floor = "3";
                } else if ("地板".equals(floor)) {
                    floor = "4";
                }
            } else {
                floor = node.xpath(".//td[@class=\"plc\"]//div[@class=\"pi\"]/strong/a/em/text()").get();
            }
            String commentId = node.xpath("./@id").get();
            CrawlerData crawlerData = null;
            try {
                crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .releaseTime(DateUtils.parseDate(releaseTime.trim(), "发表于 yyyy-MM-dd HH:mm:ss", "yyyy-MM-dd HH:mm:ss").getTime())
                        .url(requestUrl)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Content, conents.toString())
                        .addContentKV(Field_Floor, floor)
                        .build();
                crawlerDataList.add(crawlerData);
            } catch (Exception e) {
                log.error(e.getMessage());
            }
        }
        return crawlerDataList;
    }

    private List<CrawlerData> washNewsComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        //文章暂时没有存在评论，不能够进行清洗
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1, articleUrl.lastIndexOf("."));

        String rawText = httpPage.getRawText();
        String prefix = rawText.substring(0, rawText.indexOf("("));
        String data = httpPage.getJson().removePadding(prefix).jsonPath($_type + ".obj.data").all().get(0);
        List<String> commentList = JSONArray.parseArray(data, String.class);
        for (String s : commentList) {
            JSONObject jsonObject = JSONObject.parseObject(s);
        }
        return crawlerDataList;
    }

    private static String washContent(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String url = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                turnRecord.getHttpRequest().addExtra("keyword",keyword);
                crawlerRecords.add(turnRecord);
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }

    public static void main(String[] args) {
        String s = "【大河网景】2021年春运后铁路迎“全面诊疗”-大河网";
        try {
//            String encode = URLDecoder.decode(s, "UTF-8");
            String encode = URLEncoder.encode(s, "UTF-8");
            System.out.println(encode);
        } catch (UnsupportedEncodingException e) {
            e.printStackTrace();
        }

    }
}
