package com.chance.cc.crawler.development.scripts.eastMoney;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.time.LocalDate;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/11/25 16:30
 * @Description
 *      东方财富网 板块解析
 **/
public class EastMoneyCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(EastMoneyCrawlerScript.class);

    public static final String prefixUrl = "(https|http)://[a-zA-Z]*.eastmoney.com/";
    public static final String modelArticlePrefixUrl = "(http|https)://[a-zA-Z]*.eastmoney.com/a/";
    public static final String eastmoneyEntranceUrl = "https://www.eastmoney.com/";//东方财富网-总入口
    public static final String modelEntranceUrl = "http\\S*://[a-zA-Z]*.eastmoney.com/";//入口地址
    public static final String modelUrl = "http\\S*://[a-zA-Z]*.eastmoney.com/[a-zA-Z0-9]*.html";//需要解析模块的地址
    public static final String modelArticleEntranceUrl = "http\\S*://[a-zA-Z]*.eastmoney.com/a/[a-zA-Z]*.html|http\\S*://[a-zA-Z]*.eastmoney.com/a/[a-zA-Z]*_\\d+.html";//入口地址
    public static final String renwuEntranceUrl = "http\\S*://renwu.eastmoney.com/List/\\S*";//人物板块地址
    public static final String blogEntranceUrl = "http\\S*://blog.eastmoney.com/[a-zA-Z]*_\\d+.html";//博客板块地址
    public static final String jigouEntranceUrl = "http\\S*://\\S*.eastmoney.com/[a-zA-Z]+|http\\S*://\\S*.eastmoney.com/[a-zA-Z]*/\\d+";//机构板块地址
    public static final String articleUrl = "http\\S*://[a-zA-Z]*.eastmoney.com/a/\\d+.html";//文章地址
    public static final String blogArticleUrl = "http://blog.eastmoney.com/\\S*/blog_\\d+.html";//博客文章地址
    public static final String interactionUrl = "http://gbapi.eastmoney.com/abstract/api/PostShort/ArticleBriefInfo\\S*";//互动量地址
    public static final String commentUrl = "http://[a-zA-Z]*.eastmoney.com/interface/GetData.aspx";//评论地址


    public static final List<String> modelList = Arrays.asList("finance","stock","option","hk","forex","futures","gold","money",
            "bank","bond","insurance","trust","biz","enterprise","auto","renwu","blog","jigou","mingjia");
    //还未写的板块："topic"(主题),"global"(加载的其他板块),"media"(加载的其他板块),"guba"(待写),"dougu"(格式完全不一样，没有时间),"roadshow"(直播),"caipiao"(彩票),"so"(搜索)
    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "eastmoney";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(eastmoneyEntranceUrl);
        addUrlRegular(modelEntranceUrl);
        addUrlRegular(modelUrl);
        addUrlRegular(modelArticleEntranceUrl);
        addUrlRegular(renwuEntranceUrl);
        addUrlRegular(blogEntranceUrl);
        addUrlRegular(jigouEntranceUrl);
        addUrlRegular(articleUrl);
        addUrlRegular(blogArticleUrl);
        addUrlRegular(interactionUrl);
        addUrlRegular(commentUrl);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return "module".equals(site);
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();

        String webSite = requestUrl.substring(requestUrl.indexOf("//") + 2 , requestUrl.indexOf("."));
        Map<String, Object> params = new HashMap<>();
        if(requestUrl.matches(eastmoneyEntranceUrl)){
            List<String> all = httpPage.getHtml().xpath("//div[@class=\"navlist\"]/ul/li/a/@href").all();
            for (String url : all) {
                String webSiteUrl = url.substring(url.indexOf("//") + 2,url.indexOf("."));
                if(url.matches(modelEntranceUrl) && modelList.contains(webSiteUrl)){
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(url)
                            .recordKey(url)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .build();

                    parsedLinks.add(itemRecord);
                }
            }
        } else if (requestUrl.matches(modelEntranceUrl)) {
            List<String> all = new LinkedList<>();
            if(webSite.matches("finance|bank|bond|insurance|trust|biz|enterprise|auto")){
                all = httpPage.getHtml().xpath("//div[@class=\"menu_wrap\"]/ul/li/a/@href").all();
            }else if(webSite.matches("money")){
                all = httpPage.getHtml().xpath("//ul[@class=\"navbar navbar-default\"]/li/a/@href").all();
            }else if(webSite.matches("renwu")) {
                all = httpPage.getHtml().xpath("//div[@class=\"menu\"]/ul/li/a/@href").all();
            }else if(webSite.matches("blog")){
                all = httpPage.getHtml().xpath("//ul[@id=\"blog-nav\"]/li/a/@href").all();
            } else if(webSite.matches("jigou|mingjia")){
                all = httpPage.getHtml().xpath("//ul[@class=\"main_bottom_right\"]/li/a/@href").all();
            }else {
                all = httpPage.getHtml().xpath("//div[@class=\"mainMenu\"]/ul/li/a/@href").all();
            }
            for (String url : all) {
                url = (webSite.matches("bank|bond|insurance") && url.matches("/a/\\S*"))||(webSite.matches("renwu") && url.matches("/List/\\S*")) || (webSite.matches("blog") && url.matches("/[a-zA-Z]*_\\d+.html")) || (webSite.matches("jigou|mingjia") && url.matches("/[a-zA-Z]*"))
                        ? getString(prefixUrl,requestUrl) + url.substring(1) : url;

                String webSiteUrl = url.matches("http\\S*://[a-zA-Z]*.eastmoney.com/\\S*") ? url.substring(url.indexOf("//") + 2, url.indexOf(".")) : "";
                if(StringUtils.isNotEmpty(webSiteUrl) && webSiteUrl.equals(webSite)){
                    url = url.replace("/news/","/a/").replace("/pz/","/a/");

                    if(url.matches(modelArticleEntranceUrl) || url.matches(renwuEntranceUrl) || url.matches(blogEntranceUrl) || url.matches(jigouEntranceUrl) || url.matches(modelUrl)){
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(url)
                                .recordKey(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .build();

                        parsedLinks.add(itemRecord);
                    }
                }
            }
        }else if(requestUrl.matches(modelUrl)){
            List<String> all = new LinkedList<>();

            switch (webSite){
                case "finance" : all = httpPage.getHtml().xpath("//div[@class=\"item\"]/a/@href|//dd[@class=\"items\"]/a/@href|//div[@class=\"menu_wrap\"]/ul/li/a/@href").all();break;
                case "stock" : all = httpPage.getHtml().xpath("//div[@class=\"mainMenu\"]/ul/li/a/@href|//div[@class=\"menu_wrap\"]/ul/li/a/@href").all();break;
                case "hk" : all = httpPage.getHtml().xpath("//div[@class=\"mainMenu\"]/ul/li/a/@href").all();break;
            }
            for (String itemUrl : all) {
                itemUrl = webSite.equals("finance") && itemUrl.matches("yaowen_\\S*") ? getString(prefixUrl,requestUrl) + itemUrl.replace("yaowen_","a/") : itemUrl;

                String webSiteUrl = itemUrl.matches("http\\S*://[a-zA-Z]*.eastmoney.com/\\S*") ? itemUrl.substring(itemUrl.indexOf("//") + 2, itemUrl.indexOf(".")) : "";
                if(StringUtils.isNotEmpty(webSiteUrl) && webSiteUrl.equals(webSite)){
                    itemUrl  = itemUrl.replace("/news/","/a/");
                    if(itemUrl.matches(modelArticleEntranceUrl)){
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(itemUrl)
                                .recordKey(itemUrl)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .build();

                        parsedLinks.add(itemRecord);
                    }
                }
            }
        }else if (requestUrl.matches(modelArticleEntranceUrl) || requestUrl.matches(renwuEntranceUrl) || requestUrl.matches(jigouEntranceUrl)) {
            //xpath不支持语法"//a[contains(text(),'下一页')]/@href"
            List<Selectable> nodes1 = new LinkedList<>();
            if(webSite.matches("jigou|mingjia")){
                nodes1 = httpPage.getHtml().xpath("//div[@class=\"pager\"]/a").nodes() ;
            }else{
                nodes1 = httpPage.getHtml().xpath("//a[@class=\"page-btn\"]").nodes();
            }
            for (Selectable selectable : nodes1) {
                String text = selectable.xpath("./text()").get();
                if ("下一页".equals(text)) {
                    String nextPageUrl = requestUrl.matches(modelArticleEntranceUrl) ? getString(modelArticlePrefixUrl,requestUrl) + selectable.xpath("./@href").get() : getString(prefixUrl,requestUrl) +selectable.xpath("./@href").get().substring(1);

                    CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .recordKey(nextPageUrl)
                            .httpUrl(nextPageUrl)
                            .releaseTime(System.currentTimeMillis())
                            .build();
                    parsedLinks.add(turnPageRequest);
                }
            }

            List<Selectable> nodes = new LinkedList<>();
            if(webSite.matches("renwu")){
                nodes = httpPage.getHtml().xpath("//div[@class=\"artlist\"]/ul/li").nodes();
            }else if(webSite.matches("jigou|mingjia")){
                nodes = httpPage.getHtml().xpath("//ul[@class=\"typeuls\"]/li").nodes();
            }else{
                nodes = httpPage.getHtml().xpath("//ul[@id=\"newsListContent\"]/li").nodes();
            }
            for (Selectable node : nodes) {
                String itemUrl = "";
                if(webSite.matches("renwu")){
                    itemUrl = node.xpath("./a[@title]/@href").get();
                }else if(webSite.matches("jigou|mingjia")){
                    itemUrl =  node.xpath("./a[@class=\"qa\"]/@href").get();
                }else{
                    itemUrl = node.xpath(".//p[@class=\"title\"]/a/@href").get();
                }
                if (StringUtils.isBlank(itemUrl)) {
                    continue;
                }

                String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));
                String releaseTime = "";
                if(webSite.matches("renwu")){
                    releaseTime = node.xpath("./span/text()").get();
                }else if(webSite.matches("jigou|mingjia")){
                    releaseTime = node.xpath("./div[@class=\"qadiv\"]/text()").get();
                }else{
                    releaseTime = node.xpath(".//p[@class=\"time\"]/text()").get();
                }
                if (StringUtils.isBlank(releaseTime)) {
                    continue;
                }
                if(webSite.matches("renwu|jigou|mingjia")){
                    releaseTime = getString("\\d{4}-\\d{2}-\\d{2}",releaseTime) ;
                }else{
                    String year = articleKey.substring(0, 4);
                    releaseTime = year + "年" + getString("\\d{2}月\\d{2}日 \\d{2}:\\d{2}", releaseTime);
                }

                String interactionUrl = "http://gbapi.eastmoney.com/abstract/api/PostShort/ArticleBriefInfo?plat=web&version=300&product=guba&h=cf67fa64c7b3f5f2c6a0da8248c1248a&postid=" + articleKey + "&type=1";
                try {
                    Long releaseTimeToLong = null;
                    if(webSite.matches("jigou|renwu|mingjia")){
                        releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime();
                    }else{
                        releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy年MM月dd日 HH:mm").getTime();
                    }

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .resultLabelTag(article)
                            .build();
                    parsedLinks.add(itemRecord);

                    CrawlerRequestRecord iteractionRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(interactionUrl)
                            .httpUrl(interactionUrl)
                            .releaseTime(releaseTimeToLong)
                            .needParsed(false)
                            .resultLabelTag(interaction)
                            .build();
                    parsedLinks.add(iteractionRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        } else if (requestUrl.matches(articleUrl) || requestUrl.matches(blogArticleUrl)) {
            String url = requestUrl.matches(articleUrl) ? httpPage.getHtml().xpath("//div[@id=\"aboutLeft\"]/a/@href").get() : requestUrl;
            if (StringUtils.isNotEmpty(url)) {
                String commentUrl = requestUrl.matches(articleUrl) ? "http://guba.eastmoney.com/interface/GetData.aspx" : "http://blog.eastmoney.com/interface/GetData.aspx";
                //根据url来判断文章加载是否完全
                if(url.matches("http://guba.eastmoney.com/news,cjpl,\\d+.html") || url.matches(blogArticleUrl)){
                    //加载完全
                    String postid = requestUrl.matches(articleUrl) ? url.substring(url.lastIndexOf(",") + 1, url.lastIndexOf(".")) : url.substring(url.lastIndexOf("_") + 1,url.lastIndexOf("."));

                    String host = "";
                    String articleKey = "";
                    if(requestUrl.matches(articleUrl)){
                        host = "guba.eastmoney.com";
                        articleKey = requestUrl.substring(requestUrl.lastIndexOf("/") + 1, requestUrl.lastIndexOf("."));
                        params.put("param", "postid=" + postid + "&sort=1&sorttype=1&p=1&ps=30");
                        params.put("path", "reply/api/Reply/ArticleNewReplyList");
                    }else{
                        host = " blog.eastmoney.com";
                        articleKey = requestUrl.substring(requestUrl.lastIndexOf("_") + 1,requestUrl.lastIndexOf("."));
                        params.put("param","postid=" + postid + "&ps=20&p=1&replyid=");
                        params.put("path","reply/api/Reply/ArticleReplyList");
                    }
                    params.put("env", 2);

                    CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(commentUrl + postid)
                            .httpUrl(commentUrl)
                            .resultLabelTag(comment)
                            .releaseTime(System.currentTimeMillis())
                            .build();

                    HttpRequest httpRequest = commentRecord.getHttpRequest();
                    httpRequest.setMethod("POST");
                    httpRequest.setRequestBody(HttpRequestBody.form(params, "UTF-8"));
                    Map<String, String> headers = new HashMap<>();
//                headers.put("Content-Length","121");
                    headers.put("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8");
                    headers.put("Host", host);
                    headers.put("Referer", url);
                    httpRequest.setHeaders(headers);
                    Map<String, Object> extras = new HashMap<>();
                    extras.putAll(params);
                    extras.put("postid", postid);
                    extras.put("articleKey",articleKey );
                    extras.put("articleUrl",requestUrl);
                    httpRequest.setExtras(extras);
                    parsedLinks.add(commentRecord);
                }else{
                    //加载不完全，重新加载文章
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(requestUrl + "again" )
                            .httpUrl(requestUrl)
                            .releaseTime(crawlerRequestRecord.getReleaseTime())
                            .build();
                    parsedLinks.add(itemRecord);
                }
            }
        } else if (requestUrl.matches(commentUrl)) {
            Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
            int p = 1;
            String me = httpPage.getJson().jsonPath($_type + ".me").get();
            List<String> reList = httpPage.getJson().jsonPath($_type + ".re").all();
            //评论下一页
            if (StringUtils.isEmpty(me) || "操作成功".equals(me)) {
                JSONObject jsonObject = JSONObject.parseObject(reList.get(reList.size() -1));
                String releaseTime = jsonObject.getString("reply_publish_time");
                //有下一页评论
                String param = (String) extras.get("param");
                String[] split = param.split("&");
                String newParam = "";
                for (String data : split) {
                    if (data.matches("p=\\d+")) {
                        String[] split1 = data.split("=");
                        p = Integer.parseInt(split1[1]);
                        data = split1[0] + "=" + (p + 1);
                    }
                    newParam = newParam + "&" + data;
                }

                extras.put("param", newParam.substring(1));
                params.put("param", newParam.substring(1));
                params.put("path", extras.get("path"));
                params.put("env", extras.get("env"));

                try {
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime();
                    CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(requestUrl + extras.get("postid") + p)
                            .httpUrl(requestUrl)
                            .resultLabelTag(comment)
                            .releaseTime(releaseTimeToLong)
                            .build();

                    HttpRequest httpRequest = commentRecord.getHttpRequest();
                    httpRequest.setMethod("POST");
                    httpRequest.setRequestBody(HttpRequestBody.form(params, "UTF-8"));
                    httpRequest.setHeaders(crawlerRequestRecord.getHttpRequest().getHeaders());
                    httpRequest.setExtras(extras);
                    parsedLinks.add(commentRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }else{
                log.info("评论没有下一页！");
            }
        }
        //博客文章解析
        else if(requestUrl.matches(blogEntranceUrl)){
            String nextPageUrl = httpPage.getHtml().xpath("//a[@tracker-eventcode=\"blog_pageNext\"]/@href").get();
            if(StringUtils.isNotEmpty(nextPageUrl)){
                nextPageUrl = getString(prefixUrl,requestUrl) + nextPageUrl.substring(1);

                CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .build();
                parsedLinks.add(turnPageRequest);
            }

            List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"htBox dianji\"]/ul").nodes();
            for (Selectable node : nodes) {
                String itemUrl = node.xpath(".//a[@tracker-eventcode=\"blog_link\"]/@href").get();
                if(StringUtils.isEmpty(itemUrl)){
                    continue;
                }else{
                    itemUrl = getString(prefixUrl,requestUrl) + itemUrl.substring(1);
                }

                List<String> all = requestUrl.contains("/gengxin_") ? node.xpath(".//li[@class=\"w10\"]/text()").all() : node.xpath(".//li[@class=\"w15\"]/text()").all();
                String releaseTime = "";
                for (String data : all) {
                    releaseTime = data.matches("\\d{2}-\\d{2} \\d{2}:\\d{2}") ? data : "";
                }
                if(StringUtils.isEmpty(releaseTime)){
                    continue;
                }else{
                    releaseTime = LocalDate.now().getYear() + "-" + releaseTime;
                }

                try {
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime,"yyyy-MM-dd HH:mm").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .resultLabelTag(article)
                            .resultLabelTag(interaction)
                            .build();
                    parsedLinks.add(itemRecord);

                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        String requestUrl = page.getRequest().getUrl();
        String webSite = requestUrl.substring(requestUrl.indexOf("//") + 2,requestUrl.indexOf("."));
        int statusCode = page.getStatusCode();
        if(statusCode == 200){
            if(webSite.matches("blog")){
                if (crawlerResultTags.hasDataType(article)) {
                    crawlerDataList.add(washBlogArticle(crawlerRecord, page));
                }

                if (crawlerResultTags.hasDataType(interaction)) {
                    crawlerDataList.add(washBlogInteraction(crawlerRecord, page));
                }
            }else{
                if (crawlerResultTags.hasDataType(article)) {
                    crawlerDataList.add(washArticle(crawlerRecord, page));
                }

                if (crawlerResultTags.hasDataType(interaction)) {
                    crawlerDataList.add(washInteraction(crawlerRecord, page));
                }
            }

            if(crawlerResultTags.hasDataType(comment)){
                List<String> reList = page.getJson().jsonPath($_type + ".re").all();
                for (String data : reList) {
                    crawlerDataList.add(washComment(crawlerRecord,page,data));
                }
            }
        }else{
            log.warn("页面下载错误！状态码：" + statusCode);
        }
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        String title = httpPage.getHtml().xpath("//div[@class=\"newsContent\"]/h1/text()").get();

        String author = httpPage.getHtml().xpath("//div[@class=\"author\"]/text()").get();
        if (StringUtils.isNotEmpty(author)) {
            author = author.substring(author.lastIndexOf("：") + 1);
        } else {
            author = "";
        }
        String source = httpPage.getHtml().xpath("//div[@class=\"source data-source\"]/@data-source").get();

        String tract = StringUtils.isNotEmpty(httpPage.getHtml().xpath("//div[@class=\"abstract\"]/text()").get()) ? httpPage.getHtml().xpath("//div[@class=\"abstract\"]/text()").get() : "";
        String review = StringUtils.isNotEmpty(httpPage.getHtml().xpath("//div[@class=\"b-review\"]/text()").get()) ? httpPage.getHtml().xpath("//div[@class=\"b-review\"]/text()").get() : "";
        List<String> articleTextList = httpPage.getHtml().xpath("//div[@id=\"ContentBody\"]//p//text()").all();

        StringBuffer conents = new StringBuffer();
        conents.append(tract).append(review);
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }

        String releaseTime = httpPage.getHtml().xpath("//div[@class=\"time\"]/text()").get();

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy年MM月dd日 HH:mm").getTime())
                    .addContentKV(Field_Content, conents.toString().trim())
                    .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title))
                    .addContentKV(Field_Source, source.trim())
                    .addContentKV(Field_Author, author.trim())
                    .build();
        } catch (ParseException e) {
            log.error("页面(“" + itemUrl + "”)读取错误，错误码：" + httpPage.getStatusCode());
        }

        return crawlerData;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String re = httpPage.getJson().jsonPath($_type + ".re").get();
        JSONObject jsonObject = JSONObject.parseObject(re);
        String likes = (String) jsonObject.get("post_like_count");
        String comments = (String) jsonObject.get("post_comment_count");
        String views = (String) jsonObject.get("post_click_count");
        String forwards = (String) jsonObject.get("post_forward_count");

        String interactionUrl = httpPage.getRequest().getUrl();
        String[] split = interactionUrl.split("&");
        String articleKey = "";
        for (String data : split) {
            if (StringUtils.isNotEmpty(data) && data.contains("postid")) {
                articleKey = data.substring(data.indexOf("=") + 1);
                continue;
            }
        }

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                .url(interactionUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_I_Likes, likes)
                .addContentKV(Field_I_Comments, comments)
                .addContentKV(Field_I_Views, views)
                .addContentKV(Field_I_Forwards, forwards)
                .build();

        return crawlerData;
    }

    public CrawlerData washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,String data) {
        Map<String, Object> extras = httpPage.getRequest().getExtras();

        JSONObject jsonObject = JSONObject.parseObject(data);
        String content = jsonObject.getString("reply_text");
        String releaseTime = jsonObject.getString("reply_publish_time");
        String author = jsonObject.getJSONObject("reply_user").getString("user_nickname");
        String commentId = jsonObject.getString("reply_id");

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(),extras.get("articleKey") ))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(),commentId))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                    .url(httpPage.getRequest().getUrl())
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV("content", content)
                    .addContentKV("author", author)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .build();
        } catch (ParseException e) {
            e.printStackTrace();
        }

        return crawlerData;
    }

    public CrawlerData washBlogArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        String title = httpPage.getHtml().xpath("//div[@class=\"articleTitle\"]/text()").get();

        String author = httpPage.getHtml().xpath("//a[@tracker-eventcode=\"blog_userArticleUserName\"]/text()").get();

        List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"articleBody\"]//text()").all();

        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }

        String releaseTime = httpPage.getHtml().xpath("//span[@class=\"time\"]/text()").get();

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(getString("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}",releaseTime), "yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV("content", conents.toString().trim())
                    .addContentKV("title", StringEscapeUtils.unescapeHtml4(title))
                    .addContentKV("author", author)
                    .build();
        } catch (ParseException e) {
            log.error("页面(“" + itemUrl + "”)读取错误，错误码：" + httpPage.getStatusCode());
        }

        return crawlerData;

    }

    public CrawlerData washBlogInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        String views = httpPage.getHtml().xpath("//span[@id=\"blog_view\"]/text()").get();
        String comments = httpPage.getHtml().xpath("//span[@id=\"blog_review\"]/text()").get();

        String releaseTime = httpPage.getHtml().xpath("//span[@class=\"time\"]/text()").get();

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(getString("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}",releaseTime), "yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV("views", views)
                    .addContentKV("comments", comments)
                    .build();
        } catch (ParseException e) {
            log.error("页面(“" + itemUrl + "”)读取错误，错误码：" + httpPage.getStatusCode());
        }
        return crawlerData;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }
    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

}
