package com.chance.cc.crawler.development.scripts.sina;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.pipeline.result.CrawlerDataLocation;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.*;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.time.LocalDate;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Topic_Type;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/2/4 11:47
 * @Description
 **/
public class SinaCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(SinaCrawlerScript.class);
    private static final String REQUEST_AGAIN_TAG = "sina_request_retry";
    private static final String CRAWLER_MODULES = "crawlerModule";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";
    private static final String FILE_NAME = "filePath";
    private static final String DOMAIN_RESULT_JSON_RECORD_TAG = "domain_result_json";//初始record结果字段

    private static final String MP_URL = "http[s]*://auto.sina.com.cn/mp/";//自媒体
    private static final String MP_MODULE_URL = "http[s]*://auto.sina.com.cn/mp/list/\\?tagid=\\d+";
    private static final String MP_JSON_SOURCE_URL = "https://interface.sina.cn/auto/news/getWapNewsNewBycID.d.json?cid=%s&page=1&limit=20&tagid=%s&exceptIDs=";

    private static final String SEARCH_KW_URL = "https://search.sina.com.cn/\\S*";

    private static final String SINA_ENTRANCE_URL = "https://www.sina.com.cn/";
    private static final String CITY_ENTRANCE_URL = "http://city.sina.com.cn/";//城市
    private static final String CITY_MODULE_ENTRANCE_URL = "http://\\S*.sina.com.cn/news/[a-z]*/list.shtml#entrance";
    private static final String CITY_MODULE_URL = "http://[a-z]*.sina.com.cn/(news|finance|brand|life|cqlife|sports)\\S*/list.shtml";
    private static final String CITY_MODULE_JSON_URL = "http://interface.sina.cn/dfz/jx/news/index.d.html\\?cid=\\d+&ch=\\S*&page=\\d+";
    private static final String BLOG_ENTRANCE_URL = "http://blog.sina.com.cn/";//博客
    private static final String BLOG_MODULE_URL = "http://blog.sina.com.cn/lm/[a-z\\.\\/]*";
    private static final String BLOG_MODULE_JSON_SOURCE_URL = "http://feed.mix.sina.com.cn/api/roll/get?pageid=%s&num=%s&lid=%s";
    private static final String BLOG_MODULE_JSON_URL = "http://feed.mix.sina.com.cn/api/roll/get\\S*";
    private static final String BlOG_ITEM_URL = "http[s]*://blog.sina.com.cn/\\S*/blog_\\S*.html";
    private static final String BLOG_INTERACTION_URL = "http://comet.blog.sina.com.cn/api\\S*";
    private static final String BLOG_COMMENT_URL = "http://comment5.news.sina.com.cn/page/info\\S*channel=blog\\S*";
    private static final String NEWS_ENTRANCE_URL = "https://news.sina.com.cn/";//新闻
    private static final String NEWS_MODULE_URL = "http://news.sina.com.cn/[a-z]*/";
    private static final String NEWS_MIL_ENTRANCE_URL = "http://mil.news.sina.com.cn/";//新闻军事
    private static final String NEWS_MIL_HTML_URL = "http://mil.news.sina.com.cn/roll/index.d.html\\S*";
    private static final String NEWS_MIL_MODULE_URL = "http://mil.news.sina.com.cn/[a-z]*/";
    private static final String NEWS_CUL_ENTRANCE_URL = "http://cul.news.sina.com.cn/";//文化
    private static final String NEWS_SIFA_ENTRANCE_URL_ = "http://sifa.sina.com.cn/";//司法
    private static final String NEWS_SIFA_MODULE_URL_ = "http://sifa.sina.com.cn/[a-z]*/";
    private static final String NEWS_SPORT_ENTRANCE_URL = "http://sports.sina.com.cn/";//运动
    private static final String NEWS_SPORT_MODULE_URL = "http://sports.sina.com.cn/[a-z]*/";
    private static final String NEWS_ENT_ENTRANCE_URL = "http://ent.sina.com.cn/";//娱乐
    private static final String NEWS_ENT_ROLL_URL = "http://ent.sina.com.cn/rollnews.shtml";
    private static final String NEWS_TECH_ENTRANCE_URL = "http://tech.sina.com.cn/";
    private static final String NEWS_TECH_ROLL_URL = "http://tech.sina.com.cn/roll/rollnews.shtml\\S*";

    private static final String MED_ENTRANCE_URL = "http[s]*://med.sina.com/";
    private static final String MED_TURN_URL = "http[s]*://med.sina.com/article_list_-1_1_\\d+_\\d+.html";

    private static final String MODULE_JSON_SOURCE_URL_ONE = "https://feed.sina.com.cn/api/roll/get?pageid=%s&lid=%s&num=30&page=1";//板块json格式地址拼接的原始地址
    private static final String MODULE_JSON_URL_ONE = "https://feed.sina.com.cn/api/roll/get\\S*";
    private static final String MODULE_JSON_SOURCE_URL_TWO = "http://interface.sina.cn/news/get_news_by_channel_new_v2018.d.json?cat_1=%s&show_num=20&page=1";
    private static final String MODULE_JSON_URL_TWO = "http://interface.sina.cn/news/get_news_by_channel_new_v2018.d.json\\S*";
    private static final String MODULE_JSON_URL_THREE = "http://interface.sina.cn/pc_api/public_news_data.d.json\\S*";

    private static final String ITEM_URL = "http[s]*://\\S*.sina.com.cn/\\S*\\d{4}-\\d{2}-\\d{2}[0-9\\/]*/[a-z\\_]*-[a-zA-Z0-9]*.shtml\\S*";
    private static final String ITEM_URL_TWO = "http://\\S*.sina.com.cn/articles/view/\\d+/[a-z0-9A-Z]*";
    private static final String ITEM_K_URL = "http[s]*://k.sina.cn/article_\\d+_[0-9A-Za-z]+.html\\S*";
    private static final String ITEM_K_URL_TWO = "http://zx.sina.cn/[a-z]*/\\d{4}-\\d{2}-\\d{2}/[a-z]*-[a-z0-9]*.d.html";
    private static final String ITEM_STOCK_URL = "http://stock.finance.sina.com.cn/stock/go.php/vReport_Show/kind/lastest/rptid/\\d+/index.phtml";
    private static final String CAR_ITEM_URL = "http[s]*://k.sina.com.cn/article_\\d+_[0-9A-Za-z]+.html\\S*";
    private static final String ITEM_MED_URL = "http[s]*://med.sina.com/article_detail_\\d+_\\d+_\\d+.html";
    private static final String ITEM_VIDEO_URL = "http://video.sina.com.cn/p/[a-zA-Z]*/\\d{4}-\\d{2}-\\d{2}/detail-[a-zA-Z0-9]*.d.html";
    private static final String COMMENT_SOURCE_URL = "https://comment.sina.com.cn/page/info?format=json&channel=%s&newsid=%s&page_size=10&page=1";
    private static final String COMMENT_URL = "https://comment.sina.com.cn/page/info\\S*";
    private static final String COMMENT_K_SOURCE_URL = "https://cmnt.sina.cn/aj/v2/list?channel=%s&newsid=%s&group=0&thread=1&page=1";
    private static final String COMMENT_K_URL = "https://cmnt.sina.cn/aj/v2/list\\S*";
    private static final String COMMENT_VIDEO_SOURCE_URL = "http://comment5.news.sina.com.cn/page/info?format=json&channel=%s&newsid=%s&page=1&page_size=20";
    private static final String COMMENT_VIDEO_URL = "http://comment5.news.sina.com.cn/page/info\\S*page_size=\\S*";


    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "sina";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(CAR_ITEM_URL);
        addUrlRegular(ITEM_URL);
        addUrlRegular(ITEM_K_URL);
        addUrlRegular(COMMENT_URL);
        addUrlRegular(COMMENT_K_URL);
        addUrlRegular(SEARCH_KW_URL);
        addUrlRegular(SINA_ENTRANCE_URL);
        addUrlRegular(CITY_ENTRANCE_URL);
        addUrlRegular(CITY_MODULE_ENTRANCE_URL);
        addUrlRegular(CITY_MODULE_URL);
        addUrlRegular(CITY_MODULE_JSON_URL);
        addUrlRegular(BLOG_ENTRANCE_URL);
        addUrlRegular(BLOG_MODULE_URL);
        addUrlRegular(BLOG_MODULE_JSON_URL);
        addUrlRegular(BlOG_ITEM_URL);
        addUrlRegular(BLOG_INTERACTION_URL);
        addUrlRegular(BLOG_COMMENT_URL);
        addUrlRegular(ITEM_URL_TWO);
        addUrlRegular(ITEM_K_URL_TWO);
        addUrlRegular(NEWS_ENTRANCE_URL);
        addUrlRegular(NEWS_MODULE_URL);
        addUrlRegular(MODULE_JSON_URL_ONE);
        addUrlRegular(NEWS_MIL_ENTRANCE_URL);
        addUrlRegular(NEWS_MIL_HTML_URL);
        addUrlRegular(NEWS_MIL_MODULE_URL);
        addUrlRegular(MODULE_JSON_URL_TWO);
        addUrlRegular(NEWS_CUL_ENTRANCE_URL);
        addUrlRegular(MODULE_JSON_URL_THREE);
        addUrlRegular(NEWS_SIFA_ENTRANCE_URL_);
        addUrlRegular(NEWS_SIFA_MODULE_URL_);
        addUrlRegular(NEWS_SPORT_ENTRANCE_URL);
        addUrlRegular(NEWS_SPORT_MODULE_URL);
        addUrlRegular(NEWS_ENT_ENTRANCE_URL);
        addUrlRegular(NEWS_ENT_ROLL_URL);
        addUrlRegular(NEWS_TECH_ENTRANCE_URL);
        addUrlRegular(NEWS_TECH_ROLL_URL);
        addUrlRegular(MED_ENTRANCE_URL);
        addUrlRegular(MED_TURN_URL);
        addUrlRegular(ITEM_MED_URL);
        addUrlRegular(ITEM_STOCK_URL);
        addUrlRegular(ITEM_VIDEO_URL);
        addUrlRegular(COMMENT_VIDEO_URL);
        addUrlRegular(MP_URL);
        addUrlRegular(MP_MODULE_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return false;
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        String requestUrl = requestRecord.getHttpRequest().getUrl();

        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }

        if(requestUrl.matches(MP_URL)){
            if (requestRecord.tagsCreator().bizTags().hasKVTag(DOMAIN_RESULT_JSON_RECORD_TAG)) {
                KVTag domainResultJson = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(DOMAIN_RESULT_JSON_RECORD_TAG);
                CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()), CrawlerDomainUrls.class);
                String url = crawlerDomainUrls.getUrl();
                Json urlJson = new Json(url);
                String itemUrl = urlJson.jsonPath($_type + ".itemUrl").get();
                long releaseTimeToLong = Long.parseLong(urlJson.jsonPath($_type + ".releaseTimeToLong").get());
                if(!isDateRange(requestRecord,releaseTimeToLong)){
                    return crawlerRecords;
                }
                requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(DOMAIN_RESULT_JSON_RECORD_TAG);

                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(requestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                crawlerRecords.add(itemRecord);
            }
        }

        //读取文件中的地址
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String path = null;
        if (extras != null && (path = (String) extras.get(FILE_NAME)) != null) {
            File readFile = new File(path);
            try {
                BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new FileInputStream(readFile), "UTF-8"));

                String line = null;//文章地址
                while ((line = bufferedReader.readLine()) != null) {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(requestRecord)
                            .httpUrl(line)
                            .releaseTime(System.currentTimeMillis())
                            .copyBizTags()
                            .copyResultTags()
                            .build();
                    crawlerRecords.add(itemRecord);
                }
            } catch (Exception e) {
                log.error(e.getMessage());
            }
        }
        return crawlerRecords;
    }

    /**
     * 判断是否在时间范围内
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord,Long releaseTimeToLong){
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    }else if(hourFromNow != 0){
                        endTime = System.currentTimeMillis();
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if(startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime){
                isRange = true;
            }
        }else{
            isRange = true;
        }
        return isRange;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess() || (httpPage.getStatusCode() != 200 && httpPage.getStatusCode() != 404)) {
            log.error("{} status code : {}", crawlerRequestRecord.getHttpRequest().getUrl(), httpPage.getStatusCode());
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (httpPage.getStatusCode() == 404) {
            log.error("{} status code : {}", crawlerRequestRecord.getHttpRequest().getUrl(), httpPage.getStatusCode());
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();
//        if (requestUrl.matches(CAR_URL)) {
//            carUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
//        }

        if (requestUrl.matches(MP_MODULE_URL)) {
            mpModuleUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if (requestUrl.matches(NEWS_ENTRANCE_URL) || requestUrl.matches(NEWS_MIL_ENTRANCE_URL) || requestUrl.matches(NEWS_SIFA_ENTRANCE_URL_)
                || requestUrl.matches(NEWS_SPORT_ENTRANCE_URL) || requestUrl.matches(NEWS_ENT_ENTRANCE_URL) || requestUrl.matches(NEWS_TECH_ENTRANCE_URL)) {
            newsEntranceUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (requestUrl.matches(NEWS_MODULE_URL) || requestUrl.matches(NEWS_MIL_MODULE_URL) || requestUrl.matches(NEWS_CUL_ENTRANCE_URL)
                || requestUrl.matches(NEWS_SIFA_MODULE_URL_) || requestUrl.matches(NEWS_SPORT_MODULE_URL) || requestUrl.matches(NEWS_ENT_ROLL_URL)
                || requestUrl.matches(NEWS_TECH_ROLL_URL)) {
            newsModuleEntranceUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (requestUrl.matches(NEWS_MIL_HTML_URL)) {
            newsMilHtmlUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (requestUrl.matches(CITY_ENTRANCE_URL)) {
            cityEntranUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(CITY_MODULE_ENTRANCE_URL)) {
            cityModuleEntranceUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (requestUrl.matches(CITY_MODULE_URL)) {
            cityModuleUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(CITY_MODULE_JSON_URL)) {
            cityModuleJsonUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(BLOG_ENTRANCE_URL)) {
            blogEntranceUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(BLOG_MODULE_URL)) {
            blogModuleUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(BLOG_MODULE_JSON_URL)) {
            blogModuleJsonUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(SEARCH_KW_URL)) {
            searchKwUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (  requestUrl.matches(MODULE_JSON_URL_ONE) || requestUrl.matches(MODULE_JSON_URL_TWO) || requestUrl.matches(MODULE_JSON_URL_THREE)) {
            carNewsJsonUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(CAR_ITEM_URL) || requestUrl.matches(ITEM_URL) || requestUrl.matches(ITEM_K_URL) || requestUrl.matches(BlOG_ITEM_URL)
                || requestUrl.matches(ITEM_URL_TWO) || requestUrl.matches(ITEM_K_URL_TWO) || requestUrl.matches(ITEM_VIDEO_URL)) {
            itemUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_URL) || requestUrl.matches(COMMENT_K_URL) || requestUrl.matches(BLOG_COMMENT_URL) || requestUrl.matches(COMMENT_VIDEO_URL)) {
            commentUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(MED_ENTRANCE_URL)) {
            medEntranceUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(MED_TURN_URL)) {
            medTurnUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(ITEM_STOCK_URL) || requestUrl.matches(ITEM_MED_URL)) {
            itemArticleRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        return parsedLinks;
    }

    //自媒体板块获取对应的json地址解析
    private void mpModuleUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String tagId = requestUrl.substring(requestUrl.lastIndexOf("=") + 1);
        String cid = washContent("pageType = \"+\\d+\"", httpPage.getRawText());
        if (StringUtils.isBlank(cid)) {
            log.error("cid is null!requestUrl [{}]", requestUrl);
            return;
        }
        cid = cid.substring(cid.indexOf("\"") + 1, cid.lastIndexOf("\""));
        String url = String.format(MP_JSON_SOURCE_URL, cid, tagId);

        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(record);

    }

    //获取医药的异步加载页面
    private void medEntranceUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String sign = httpPage.getHtml().xpath("//div[@class=\"list show\"]/a[@class=\"clickmore\"]/@sign").get();
        if (StringUtils.isBlank(sign)) {
            log.error("sign is null!");
            return;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        String turnUrl = requestUrl + "article_list_" + sign + ".html";
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(turnUrl)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);
    }

    private void medTurnUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //翻页
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("-1_1_");
        String[] split1 = split[1].split("_");
        String nextUrl = split[0] + "-1_1_" + (Integer.parseInt(split1[0]) + 1) + "_" + split1[1];
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);

        //详情页解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//li/div[@class=\"indextext-right\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath(".//a[@class=\"indextext-title\"]/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                return;
            }


            String releaseTime = node.xpath(".//span[@class=\"indextext-time\"]/text()").get();
            if (StringUtils.isBlank(releaseTime)) {
                return;
            } else {
                releaseTime = releaseTime.trim();
                releaseTime = releaseTime.split("\\.")[0];
            }

            try {
                long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }


    private void carUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        List<String> kvTagListVal = (List<String>) categoryTag.getKVTag(CRAWLER_MODULES).getVal();
        categoryTag.getKvTags().remove(CRAWLER_MODULES);
        for (String data : kvTagListVal) {
            if ("peizhi".equals(data)) {
                String url = httpPage.getHtml().xpath("//div[@id=\"nav\"]//a[text() = \"配置\"]/@href").get();
            } else if ("photo".equals(data)) {
                String url = httpPage.getHtml().xpath("//div[@id=\"nav\"]//a[text() = \"图片\"]/@href").get();
            } else if ("video".equals(data)) {
                carModuleRecord(crawlerRequestRecord, httpPage, parsedLinks, "//div[@id=\"nav\"]//a[text() = \"视频\"]/@href", "video");
            } else if ("news".equals(data)) {
                carModuleRecord(crawlerRequestRecord, httpPage, parsedLinks, "//div[@id=\"nav\"]//a[text() = \"资讯\"]/@href", "news");
            } else if ("price".equals(data)) {
                String url = httpPage.getHtml().xpath("//div[@id=\"nav\"]//a[text() = \"报价\"]/@href").get();
            } else if ("shanghai".equals(data)) {
                String url = httpPage.getHtml().xpath("//div[@id=\"nav\"]//a[text() = \"上海经销商\"]/@href").get();
            }
        }
    }

    private void cityEntranUrlRecord(CrawlerRequestRecord crawlerRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        List<String> all = httpPage.getHtml().xpath("//div[@class=\"subnav clear\"]/div/a/@href").all();
        for (String url : all) {
            String citySite = url.substring(url.indexOf("//") + 2, url.indexOf("."));
            String entranceUrl = getEntranceUrl(citySite);
            if (StringUtils.isEmpty(entranceUrl)) {
                continue;
            }
            CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(entranceUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .notFilterRecord()
                    .build();

            CrawlerDataLocation location = new CrawlerDataLocation();
            location.setProvince(citySite);
            crawlerRequestRecord.tagsCreator().bizTags().addLocation(location);
            parsedLinks.add(crawlerRequestRecord);
        }
    }

    private void cityModuleEntranceUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //解析左栏所有的地址
        List<String> all = httpPage.getHtml().xpath("//ul[@class=\"navlist-sub\"]/li/a/@href").all();
        for (String url : all) {
            if (StringUtils.isBlank(url) || !url.matches(CITY_MODULE_URL)) {
                continue;
            }
            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(turnRecord);
        }
    }

    private void cityModuleUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //查找参数
        String config = httpPage.getHtml().xpath("//div[@id=\"listArticle\"]/script").get();
        /*String config = "";
        Pattern compile = Pattern.compile("CONFIG[\\S\\s]*}");
        Matcher matcher = compile.matcher(httpPage.getRawText());
        while (matcher.find()){
            config = matcher.group(0);
        }*/
        if (StringUtils.isEmpty(config)) {
            return;
        }
        config = config.substring(config.indexOf("{") + 1, config.indexOf("}")).replaceAll("\n", "").replaceAll("\t", "");
        String[] paramsList = config.split(",");
        Map<String, String> paramsMap = new HashMap<>();
        for (String param : paramsList) {
            String[] params = param.split(":");
            if (params[0].trim().matches("url|channel|ch")) {
                paramsMap.put(params[0].trim(), param.substring(param.indexOf("'") + 1, param.lastIndexOf("'")).trim());
            } else {
                paramsMap.put(params[0].trim(), params[1].trim());
            }
        }

        //拼接地址
        String url = paramsMap.get("url") + "?cid=" + paramsMap.get("cid") + "&ch=" + paramsMap.get("ch") + "&page=1";
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);
    }

    private String getEntranceUrl(String citySite) {
        String url = "";
        if (citySite.matches("gx|hb")) {
            url = "http://" + citySite + ".sina.com.cn/news/sh/list.shtml#entrance";
        } else if (citySite.matches("hunan")) {
            url = "http://" + citySite + ".sina.com.cn/news/s/list.shtml#entrance";
        } else {
            url = "http://" + citySite + ".sina.com.cn/news/m/list.shtml#entrance";
        }

        if (!citySite.matches("gx|hb|hunan")) {
            url = "http://" + citySite + ".sina.com.cn/news/m/list.shtml#entrance";
        }
        return url;
    }

    private void cityModuleJsonUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("page=");
        String nextPageUrl = split[0] + "page=" + (Integer.parseInt(split[1]) + 1);
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextPageUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);

        List<String> itemList = httpPage.getJson().jsonPath($_type + ".result.data.list").all();
        for (String item : itemList) {
            Json json = new Json(item);
            String itemUrl = json.jsonPath($_type + ".URL").get();
            if (StringUtils.isEmpty(itemUrl)) {
                continue;
            }

            String releaseTime = json.jsonPath($_type + ".fpTime").get();
            if (StringUtils.isEmpty(releaseTime)) {
                continue;
            }
            Pattern compile = Pattern.compile("\\d{4}-\\d{2}-\\d{2}");
            Matcher matcher = compile.matcher(itemUrl);
            if (matcher.find()) {
                releaseTime = matcher.group(0).split("-")[0] + releaseTime;
            }


            try {
                long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyyMM-dd HH:mm").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();

                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void blogEntranceUrlRecord(CrawlerRequestRecord crawlerRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        List<String> all = httpPage.getHtml().xpath("//ul[@class=\"nav-list\"]/li/a/@href").all();
        for (String url : all) {
            if (!url.matches(BLOG_MODULE_URL)) {
                continue;
            }
            CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .notFilterRecord()
                    .build();
            parsedLinks.add(crawlerRequestRecord);
        }
    }

    private void blogModuleUrlRecord(CrawlerRequestRecord crawlerRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String pageId = washContent("pageId: \\d+", httpPage.getRawText());
        String num = washContent("num: \\d+", httpPage.getRawText());
        String lid = washContent("lid: \\d+", httpPage.getRawText());
        if (StringUtils.isBlank(pageId) || StringUtils.isBlank(num) || StringUtils.isBlank(lid)) {
            return;
        }

        String url = String.format(BLOG_MODULE_JSON_SOURCE_URL, pageId.split(":")[1].trim(), num.split(":")[1].trim(), lid.split(":")[1].trim());
        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .notFilterRecord()
                .build();
        parsedLinks.add(crawlerRequestRecord);
    }

    private void blogModuleJsonUrlRecord(CrawlerRequestRecord crawlerRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        //解析下一页
        List<String> all = new ArrayList<>();
        try {
            all = httpPage.getJson().jsonPath($_type + ".result.data").all();
        } catch (Exception e) {
            log.error("sina blog page(“" + requestUrl + "”) download is fail !will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRecord);
            return;
        }

        Json lastArticleJson = new Json(all.get(all.size() - 1));
        String ctime = lastArticleJson.jsonPath($_type + ".ctime").get();
        if (StringUtils.isNotEmpty(ctime)) {
            String nextPageUrl = "";
            if (requestUrl.contains("ctime")) {
                String[] split = requestUrl.split("ctime=");
                nextPageUrl = split[0] + "ctime=" + ctime;
            } else {
                nextPageUrl = requestUrl + "&ctime=" + ctime;
            }

            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(turnRecord);
        }

        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String itemUrl = jsonObject.getString("url");
            String releaseTime = jsonObject.getString("ctime");
            if (StringUtils.isEmpty(itemUrl) || StringUtils.isEmpty(releaseTime)) {
                continue;
            }

            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(itemUrl)
                    .releaseTime(Long.parseLong(releaseTime + "000"))
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(itemRecord);
        }
    }

    private void newsEntranceUrlRecord(CrawlerRequestRecord crawlerRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        List<String> all = httpPage.getHtml().xpath("//div[@data-sudaclick=\"newsnav\"]/a/@href|" +
                "//ul[@data-sudaclick=\"section-nav\"]/li[not(@class=\"active\")]/a/@href|" +
                "//ul[@class=\"header_menu page_radius\"]/li/a/@href|" +
                "//a[@suda-uatrack=\"key=ty0526&value=blk_index_sports_nav_basic_18\"]/@href").all();
        if (all.size() < 1) {
            all = httpPage.getHtml().xpath("//a[text()='滚动']/@href").all();
        }
        for (String url : all) {
            CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(requestRecord);
        }
    }

    private void newsModuleEntranceUrlRecord(CrawlerRequestRecord crawlerRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String url = "";
        if (requestUrl.matches(NEWS_MODULE_URL) || requestUrl.matches(NEWS_SPORT_MODULE_URL) || requestUrl.matches(NEWS_ENT_ROLL_URL)
                || requestUrl.matches(NEWS_TECH_ROLL_URL)) {
            String pageId = httpPage.getHtml().xpath("//a[text()='全部']/@pageid").get();
            String lid = httpPage.getHtml().xpath("//a[text()='全部']/@s_id").get();
            if (StringUtils.isBlank(pageId) || StringUtils.isBlank(lid)) {
                log.error("pageId or lid can not null !");
                return;
            }
            url = String.format(MODULE_JSON_SOURCE_URL_ONE, pageId, lid);
        } else if (requestUrl.matches(NEWS_MIL_MODULE_URL)) {
            String cat_1 = "";
            if (requestUrl.contains("zhengming")) {
                cat_1 = "263326";
            } else if (requestUrl.contains("dgby")) {
                cat_1 = "70035";
            } else if (requestUrl.contains("jshm")) {
                cat_1 = "57921";
            }
            if (StringUtils.isEmpty(cat_1)) {
                return;
            }
            url = String.format(MODULE_JSON_SOURCE_URL_TWO, cat_1);
        } else if (requestUrl.matches(NEWS_CUL_ENTRANCE_URL)) {
            url = "http://interface.sina.cn/pc_api/public_news_data.d.json?cids=209211&pdps=PDPS000000060130%2CPDPS000000066866&type=std_news%2Cstd_slide%2Cstd_video&pageSize=20&top_id=hencxtu1691422%2Chencxtu5974075%2Chencxtu5919005%2Chencxtu5908111&mod=nt_culture0&cTime=1483200000&up=0&action=0&tm=1613970870";
        } else if (requestUrl.matches(NEWS_SIFA_MODULE_URL_)) {
            String pageid = "";
            String lid = "";
            if (requestUrl.contains("news")) {
                pageid = "354";
                lid = "2120";
            } else if (requestUrl.contains("publicity")) {
                pageid = "391";
                lid = "2508";
            }

            if (StringUtils.isEmpty(lid) || StringUtils.isEmpty(pageid)) {
                return;
            }
            url = String.format(MODULE_JSON_SOURCE_URL_ONE, pageid, lid);
        }
        if (StringUtils.isEmpty(url)) {
            return;
        }
        CrawlerRequestRecord build = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(build);
    }

    private void newsMilHtmlUrlRecord(CrawlerRequestRecord crawlerRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //翻页
        String nextUrl = httpPage.getHtml().xpath("//a[text()='下一页']/@href").get();
        if (StringUtils.isNotBlank(nextUrl)) {
            nextUrl = StringEscapeUtils.unescapeHtml(nextUrl);
            CrawlerRequestRecord nextRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyResultTags()
                    .copyBizTags()
                    .build();
            parsedLinks.add(nextRecord);
        }

        //详情页
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"fixList\"]//li").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath("./a/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            }

            String releaseTime = node.xpath("./span/text()").get();
            if (StringUtils.isBlank(releaseTime)) {
                continue;
            } else {
                releaseTime = washContent("\\d{4}-\\d{2}-\\d{2}", itemUrl) + washContent("\\d{2}:\\d{2}", releaseTime);
            }

            try {
                long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-ddHH:mm").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }

    }

    private void carModuleRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, String xpath, String site) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String cookie = "";
        String host = "";
        if ((cookie = categoryTag.getKVTagStrVal("Cookie")) == null || (host = categoryTag.getKVTagStrVal("Host")) == null) {
            log.error("cookie or host can not null");
            return;
        }
        String url = httpPage.getHtml().xpath(xpath).get();
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .httpHead("Cookie", cookie)
                .httpHead("Host", host)
                .build();
        record.tagsCreator().bizTags().addSite(site);
        record.tagsCreator().bizTags().getCategoryTag().getKvTags().remove("Cookie");
        record.tagsCreator().bizTags().getCategoryTag().getKvTags().remove("Host");
        parsedLinks.add(record);
    }

    private void searchKwUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String rawText = httpPage.getRawText();
        String counts = washContent("找到相关新闻\\d+篇", rawText);
        if ("找到相关新闻0篇".equals(counts)) {
            log.error(counts);
            return;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        //翻页
        String aText = washContent("<a[\\S\\ ]*>下一页</a>", rawText);
        if (StringUtils.isNotBlank(aText)) {
            String nextUrl = new Html(aText).xpath("//a/@href").get();
            nextUrl = requestUrl.split("\\?")[0] + StringEscapeUtils.unescapeHtml(nextUrl);
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(record);
        }

        //详情页
        String allItemDatas = rawText.substring(rawText.indexOf("<div id=\"nav_result_container\"></div>"), rawText.indexOf("<table cellspacing=\"0\" style=\"margin:0 auto;\">"));
        if (StringUtils.isBlank(allItemDatas)) {
            return;
        }
        List<Selectable> nodes = new Html(allItemDatas).xpath("//h2").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath("./a/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            } else {
                itemUrl = StringEscapeUtils.unescapeHtml(itemUrl);
            }

            String releaseTime = node.xpath("./span/text()").get();
            releaseTime = StringUtils.isBlank(releaseTime) ? "" : washContent("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", releaseTime);
            if (StringUtils.isBlank(releaseTime)) {
                continue;
            }

            try {
                long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    //json格式
    private void carNewsJsonUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        //翻页
        String[] split = requestUrl.split("\\?");
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        int currentPage = 1;
        String nextUrl = split[0] + "?";
        for (NameValuePair nameValuePair : parse) {
            if ("page".equals(nameValuePair.getName()) || "up".equals(nameValuePair.getName())) {
                currentPage = Integer.parseInt(nameValuePair.getValue());
                nextUrl = nextUrl + nameValuePair.getName() + "=" + (currentPage + 1) + "&";
            } else if ("exceptIDs".equals(nameValuePair.getName())) {
                try {
                    nextUrl = nextUrl + nameValuePair.getName() + "=" + URLEncoder.encode(nameValuePair.getValue(), "UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
                    log.error(e.getMessage());
                }
            } else {
                nextUrl = nextUrl + nameValuePair.getName() + "=" + nameValuePair.getValue() + "&";
            }
        }
        nextUrl = nextUrl.substring(0, nextUrl.length() - 1);
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);

        //详情
        List<String> all = new ArrayList<>();
        if ( requestUrl.matches(MODULE_JSON_URL_THREE)) {
            all = httpPage.getJson().jsonPath($_type + ".data").all();
        } else if (requestUrl.matches(MODULE_JSON_URL_ONE) || requestUrl.matches(MODULE_JSON_URL_TWO)) {
            all = httpPage.getJson().jsonPath($_type + ".result.data").all();
        }
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String itemUrl = jsonObject.getString("pc_url");
            if (StringUtils.isEmpty(itemUrl)) {
                itemUrl = jsonObject.getString("url");
            }
            String releaseTime = jsonObject.getString("cTime");
            if (StringUtils.isEmpty(releaseTime)) {
                if (requestUrl.matches(MODULE_JSON_URL_TWO)) {
                    releaseTime = jsonObject.getString("createtime");
                } else if (requestUrl.matches(MODULE_JSON_URL_ONE) || requestUrl.matches(MODULE_JSON_URL_THREE)) {
                    releaseTime = jsonObject.getString("ctime");
                }
            }

            if (StringUtils.isBlank(itemUrl) || StringUtils.isBlank(releaseTime)) {
                continue;
            }

            try {
                long releaseTimeToLong = releaseTime.matches("\\d+") ? Long.parseLong(releaseTime + "000") : DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();

                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void itemArticleRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
            resultTags.getCategoryTag().removeLabelTag("interaction");
        }

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
            resultTags.getCategoryTag().removeLabelTag("comment");
        }
    }


    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String h1 = httpPage.getHtml().xpath("//h1/text()").get();
        String p = httpPage.getHtml().xpath("//p[@class=\"u-lost-des\"]/text()").get();
        if ((requestUrl.matches(BlOG_ITEM_URL) && StringUtils.isNotEmpty(h1) && (h1.contains("不存在") || h1.contains("被删除") || httpPage.getRawText().contains("此账户出现异常")))
                || (StringUtils.isNotBlank(p) && p.contains("不存在"))) {
            crawlerRequestRecord.setNeedWashPage(false);
            log.error(h1 + p + "articleUrl:[{}]", requestUrl);
            return;
        }

        boolean needWashPage = crawlerRequestRecord.isNeedWashPage();
        if(!needWashPage){
            String releaseTime = httpPage.getHtml().xpath("//span[contains(@class,'date')]/text()|//p[@class=\"source-time\"]/span[1]/text()|//span[text()='发布时间：']/em/text()").get();
            releaseTime = StringUtils.isBlank(releaseTime) ? washContent("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", httpPage.getRawText()) : releaseTime;
            releaseTime = StringUtils.isBlank(releaseTime) ? LocalDate.now().getYear() + httpPage.getHtml().xpath("//span[@class=\"weibo_time_day\"]/text()").get() : releaseTime;
            if (releaseTime.contains("日期：")) {
                releaseTime = releaseTime.split("：")[1];
            }
            if(releaseTime.contains("发表于")){
                releaseTime = releaseTime.split("于")[1].trim();
            }
            try {
                Long releaseTimeToLong = DateUtils.parseDate(releaseTime.trim(), "yyyy-MM-dd HH:mm:ss", "yyyy年MM月dd日 HH:mm", "yyyy-MM-dd HH:mm", "yyyyMM月dd日", "yyyy-MM-dd","yyyy/MM/dd HH:mm").getTime();
                if(!isDateRange(crawlerRequestRecord,releaseTimeToLong)){
                    return;
                }
                crawlerRequestRecord.setReleaseTime(releaseTimeToLong);
                crawlerRequestRecord.setNeedWashPage(true);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }

        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        String url = "";
        if (!requestUrl.matches(BlOG_ITEM_URL)) {
            String channel = washContent("channel:[\\ ]*'[a-zA-Z0-9]+'", httpPage.getRawText());
            channel = StringUtils.isBlank(channel) ? washContent("\"channel\":\"[a-z]+\"", httpPage.getRawText()) : channel;
            String id = washContent("newsid:[\\ ]*'[a-zA-Z0-9\\-]+'", httpPage.getRawText());
            id = StringUtils.isBlank(id) ? washContent("newsid=\"[a-zA-Z0-9\\-]+\"", httpPage.getRawText()) : id;
            if (requestUrl.matches(ITEM_K_URL) || requestUrl.matches(ITEM_K_URL_TWO)) {
                String content = washContent("\"cmntId\":\"[a-z]+:[a-zA-Z0-9\\-]+", httpPage.getRawText());
                content = StringUtils.isNotBlank(content) ? content.substring(content.lastIndexOf("\"") + 1) : washContent("cmntId:[\\ ]*'[a-z]+:[a-zA-Z0-9\\-]+", httpPage.getRawText());
                if (StringUtils.isNotBlank(content)) {
                    if (content.contains("'")) {
                        content = content.substring(content.lastIndexOf("\'") + 1);
                    }
                    String[] split = content.split(":");
                    channel = split[0];
                    id = split[1];
                } else {
                    channel = "";
                }
            } else if (requestUrl.matches(ITEM_VIDEO_URL)) {
                channel = washContent("channel:[\"\\ \']*[a-z]*[\"\']*", httpPage.getRawText());
                id = washContent(" newsid:[\"\\ \']*[a-zA-Z0-9\\-]*[\"\']*", httpPage.getRawText());
            }
            if (StringUtils.isBlank(channel) || StringUtils.isBlank(id)) {
                crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag("interaction");
                crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag("comment");
                log.error("channel or id can not null !");
                return;
            }
            if (id.contains("'")) {
                id = id.substring(id.indexOf("'") + 1, id.lastIndexOf("'"));
            } else if (id.contains("\"")) {
                id = id.substring(id.indexOf("\"") + 1, id.lastIndexOf("\""));
            }
            if (channel.contains("'")) {
                channel = channel.substring(channel.indexOf("'") + 1, channel.lastIndexOf("'"));
            } else if (channel.contains("\"") && channel.contains(":")) {
                String[] split = channel.split(":");
                channel = split[1].substring(split[1].indexOf("\"") + 1, split[1].lastIndexOf("\""));
            }

            if (requestUrl.matches(ITEM_K_URL) || requestUrl.matches(ITEM_K_URL_TWO)) {
                url = String.format(COMMENT_K_SOURCE_URL, channel, id);
            } else if (requestUrl.matches(ITEM_VIDEO_URL)) {
                url = String.format(COMMENT_VIDEO_SOURCE_URL, channel, id);
            } else {
                url = String.format(COMMENT_SOURCE_URL, channel, id);
            }
        }

        String substring = requestUrl.substring(requestUrl.lastIndexOf("_") + 1, requestUrl.lastIndexOf("."));
        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
            crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag("interaction");
            String interactionUrl = url;
            if (requestUrl.matches(BlOG_ITEM_URL)) {
                String[] split = substring.split("010");
                interactionUrl = "http://comet.blog.sina.com.cn/api?maintype=num&uid=" + split[0] + "&aids=0" + split[1];
            }

            if (StringUtils.isBlank(interactionUrl)) {
                return;
            }

            CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(interactionUrl)
                    .recordKey(interactionUrl + "interaction")
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .copyBizTags()
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .build();
            interactionRecord.getHttpRequest().addExtra("articleUrl", requestUrl);
            if (interactionUrl.matches(COMMENT_K_URL)) {
                interactionRecord.getHttpRequest().addHeader("referer", requestUrl);
            }
            parsedLinks.add(interactionRecord);
        }

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
            crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().removeLabelTag("comment");
            CrawlerRecord commentFilter = null;
            if ((commentFilter = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class)) == null) {
                log.error("comment filter can not null!");
                return;
            }

            String commentUrl = url;
            if (requestUrl.matches(BlOG_ITEM_URL)) {
                commentUrl = "http://comment5.news.sina.com.cn/page/info?channel=blog&newsid=" + substring + "&page_size=50&oe=utf-8&score=&fake=1&thread=1&list=desc&page=1";
            }

            if (StringUtils.isEmpty(commentUrl)) {
                return;
            }

            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl + "comment")
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .notFilterRecord()
                    .needWashed(true)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                    .build();
            commentRecord.setFilter(commentFilter.getFilter());
            commentRecord.setFilterInfos(commentFilter.getFilterInfos());
            commentRecord.getHttpRequest().addExtra("articleUrl", requestUrl);
            if (commentUrl.matches(COMMENT_K_URL)) {
                commentRecord.getHttpRequest().addHeader("referer", requestUrl);
            }
            parsedLinks.add(commentRecord);
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        //评论翻页
        String msg = "";
        try {
            msg = httpPage.getJson().jsonPath($_type + ".result.status.msg").get();
        } catch (Exception e) {
            log.error(e.getMessage() + "!will retry!");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        if (StringUtils.isNotEmpty(msg)) {
            log.error("comment need to retry!");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        boolean comment = crawlerRequestRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.comment);
        if (!comment) {
            return;
        }

        //判断是否要进行翻页
        String[] split = requestUrl.split("\\?");
        String commentUrl = split[0] + "?";
        int pageSize = 0;
        int page = 1;
        List<NameValuePair> parse = URLEncodedUtils.parse(requestUrl.split("\\?")[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            if ("page_size".equals(nameValuePair.getName())) {
                pageSize = Integer.parseInt(nameValuePair.getValue());
                commentUrl = commentUrl + nameValuePair.getName() + "=" + pageSize + "&";
            } else if ("page".equals(nameValuePair.getName())) {
                page = Integer.parseInt(nameValuePair.getValue());
                commentUrl = commentUrl + nameValuePair.getName() + "=" + (page + 1) + "&";
            } else {
                commentUrl = commentUrl + nameValuePair.getName() + "=" + nameValuePair.getValue() + "&";
            }
        }
        commentUrl = commentUrl.substring(0, commentUrl.length() - 1);

        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                .needWashed(true)
                .copyResultTags()
                .build();
        commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        parsedLinks.add(commentRecord);
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags resultTags = crawlerRecord.tagsCreator().resultTags();
        String requestUrl = page.getRequest().getUrl();

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.article)) {
            if (requestUrl.matches(BlOG_ITEM_URL)) {
                crawlerDataList.add(washBlogArticle(crawlerRecord, page));
            } else if (requestUrl.matches(ITEM_MED_URL)) {
                crawlerDataList.add(washMedArticle(crawlerRecord, page));
            } else {
                crawlerDataList.addAll(washArticle(crawlerRecord, page));
            }
        }

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
            if (requestUrl.matches(BLOG_INTERACTION_URL)) {
                crawlerDataList.add(washBlogInteraction(crawlerRecord, page));
            } else {
                crawlerDataList.add(washInteraction(crawlerRecord, page));
            }
        }

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
            if (requestUrl.matches(BLOG_COMMENT_URL)) {
                crawlerDataList.addAll(washBlogComment(crawlerRecord, page));
            } else {
                crawlerDataList.addAll(washComment(crawlerRecord, page));
            }
        }

        return crawlerDataList;
    }

    public List<CrawlerData> washArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = "";
        if (itemUrl.matches(CAR_ITEM_URL) || itemUrl.matches(ITEM_K_URL)) {
            articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));
        } else if (itemUrl.matches(ITEM_URL)) {
            articleKey = itemUrl.substring(itemUrl.lastIndexOf("-") + 1, itemUrl.lastIndexOf("."));
        } else if (itemUrl.matches(ITEM_URL_TWO)) {
            articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);
        } else if (itemUrl.matches(ITEM_K_URL_TWO) || itemUrl.matches(ITEM_VIDEO_URL)) {
            articleKey = itemUrl.substring(itemUrl.lastIndexOf("-") + 1, itemUrl.lastIndexOf(".d"));
        } else if (itemUrl.matches(ITEM_STOCK_URL)) {
            String[] split = itemUrl.split("/");
            articleKey = split[split.length - 2];
        }

        String title = httpPage.getHtml().xpath("//h1/text()|//meta[@property=\"og:title\"]/@content|//h2[@title]/text()").get();
        String author = httpPage.getHtml().xpath("//span[contains(@class,'source')]/a/text()|//span[contains(@class,'source')]/text()|//span[contains(@id,'source')]|//span[@class=\"media_name\"]/text()|//a[contains(@class,'source')]/text()|//div[@class=\"txtdetail\"]/a/text()|//h2[@class=\"weibo_user\"]/text()").get();
        String source = httpPage.getHtml().xpath("//span[text()='来源：']/a/text()").get();
        String releaseTime = httpPage.getHtml().xpath("//span[contains(@class,'date')]/text()|//p[@class=\"source-time\"]/span[1]/text()|//span[text()='发布时间：']/em/text()").get();
        releaseTime = StringUtils.isBlank(releaseTime) ? washContent("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", httpPage.getRawText()) : releaseTime;
        if (itemUrl.matches(ITEM_STOCK_URL)) {
            List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"creab\"]//span").nodes();
            for (Selectable node : nodes) {
                String text = node.xpath("./text()").get();
                if (text.contains("研究员：")) {
                    author = node.xpath("./a/text()").get();
                }

                if (text.contains("机构：")) {
                    source = node.xpath("./a/text()").get();
                }

                if (text.contains("日期：")) {
                    releaseTime = text;
                }
            }
        }
        String time = httpPage.getHtml().xpath("//span[@class=\"weibo_time_day\"]/text()").get();
        if(StringUtils.isBlank(time)){
            releaseTime = "";
        }else{
            releaseTime = StringUtils.isBlank(releaseTime) ? LocalDate.now().getYear() + time: releaseTime;
        }
        if (releaseTime.contains("日期：")) {
            releaseTime = releaseTime.split("：")[1];
        }
        if(releaseTime.contains("发表于")){
            releaseTime = releaseTime.split("于")[1].trim();
        }

        List<String> all = httpPage.getHtml().xpath("//div[@id=\"artibody\"]/p//text()|//div[@id=\"article\"]/p//text()|//div[@id=\"artibody\"]/div/p//text()|//div[@id=\"artibody\"]/div/font//text()|//div[@id=\"artibody\"]/text()|//div[@class=\"mainBody\"]/p//text()" +
                "|//section[@class=\"art_pic_card art_content\"]/p//text()|//div[@class=\"blk_container\"]//text()|//em[@task=\"oldinfor\"]//text()|//div[@class=\"xj_module\"]/p//text()").all();
        StringBuffer content = new StringBuffer();
        for (String data : all) {
            if (StringUtils.isBlank(data)) {
                continue;
            }
            if (data.contains("video")) {
                continue;
            }
            content.append(data).append(" ");
        }
        List<String> keywordsList = httpPage.getHtml().xpath("//div[@id=\"keywords\"]/a/text()|//div[@class=\"animation-opacity\"]//div[@class=\"ul\"]/a/text()|//div[@class=\"keywords\"]/a/text()|//p[@class=\"tags\"]/a/text()").all();

        try {
            Long releaseTimeToLong = StringUtils.isBlank(releaseTime) ? requestRecord.getReleaseTime() : DateUtils.parseDate(releaseTime.trim(), "yyyy-MM-dd HH:mm:ss", "yyyy年MM月dd日 HH:mm", "yyyy-MM-dd HH:mm", "yyyyMM月dd日", "yyyy-MM-dd","yyyy/MM/dd HH:mm").getTime();
            CrawlerData article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .releaseTime(releaseTimeToLong)
                    .url(itemUrl)
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Source, source)
                    .addContentKV(Field_Content, content.toString().trim())
                    .flowInPipelineTag("kafka")
                    .build();
            if (keywordsList != null && keywordsList.size() > 0) {
                article.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type, keywordsList);
            }
            article.setFilterPipelineResult(true);
            crawlerDataList.add(article);

            CrawlerData itemUrlData = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(),"itemUrl", articleKey))
                    .releaseTime(releaseTimeToLong)
                    .url(itemUrl)
                    .addContentKV("itemUrl", itemUrl)
                    .addContentKV("releaseTimeToLong", String.valueOf(releaseTimeToLong))
                    .flowInPipelineTag("mysql")
                    .build();
            itemUrlData.setFilterPipelineResult(true);
            crawlerDataList.add(itemUrlData);

        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return crawlerDataList;
    }

    private CrawlerData washInteraction(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String url = (String) requestRecord.getHttpRequest().getExtras().get("articleUrl");
        String articleKey = "";
        if (url.matches(CAR_ITEM_URL) || url.matches(ITEM_K_URL)) {
            articleKey = url.substring(url.lastIndexOf("_") + 1, url.lastIndexOf("."));
        } else if (url.matches(ITEM_URL)) {
            articleKey = url.substring(url.lastIndexOf("-") + 1, url.lastIndexOf("."));
        } else if (url.matches(ITEM_URL_TWO)) {
            articleKey = url.substring(url.lastIndexOf("/") + 1);
        } else if (url.matches(ITEM_K_URL_TWO) || url.matches(ITEM_VIDEO_URL)) {
            articleKey = url.substring(url.lastIndexOf("-") + 1, url.lastIndexOf(".d"));
        }


        CrawlerData interaction = null;
        try {
            Json json = httpPage.getJson();
            String comments = json.jsonPath($_type + ".result.count.show").get();
            String releaseTime = json.jsonPath($_type + ".result.news.time").get();
            long releaseTimeToLong = 0;
            if (StringUtils.isEmpty(releaseTime)) {
                releaseTimeToLong = requestRecord.getReleaseTime();
            } else {
                releaseTime = releaseTime.contains("发表于") ? releaseTime.substring(releaseTime.indexOf("于") + 1).trim() : releaseTime;
                releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss", "yyyy/MM/dd HH:mm", "yyyyMM月dd日HH:mm").getTime();
            }
            interaction = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_I_Comments, comments)
                    .flowInPipelineTag("kafka")
                    .build();
            interaction.setFilterPipelineResult(true);
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return interaction;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        List<CrawlerData> comments = new ArrayList<>();

        String itemUrl = httpPage.getRequest().getUrl();
        Json json = httpPage.getJson();
        String url = (String) requestRecord.getHttpRequest().getExtras().get("articleUrl");
        String articleKey = "";
        if (url.matches(CAR_ITEM_URL) || url.matches(ITEM_K_URL)) {
            articleKey = url.substring(url.lastIndexOf("_") + 1, url.lastIndexOf("."));
        } else if (url.matches(ITEM_URL)) {
            articleKey = url.substring(url.lastIndexOf("-") + 1, url.lastIndexOf("."));
        } else if (url.matches(ITEM_URL_TWO) || url.matches(ITEM_VIDEO_URL)) {
            articleKey = url.substring(url.lastIndexOf("/") + 1);
        } else if (url.matches(ITEM_K_URL_TWO)) {
            articleKey = url.substring(url.lastIndexOf("-") + 1, url.lastIndexOf(".d"));
        }


        try {
            List<String> commentList = json.jsonPath($_type + ".result.cmntlist").all();
            for (String comment : commentList) {
                JSONObject jsonObject = JSONObject.parseObject(comment);
                String commentId = jsonObject.getString("uid");
                String area = jsonObject.getString("area");
                String author = jsonObject.getString("nick");
                String content = jsonObject.getString("content");
                String releaseTime = jsonObject.getString("time");

                CrawlerData commentData = CrawlerData.builder()
                        .data(requestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .url(itemUrl)
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Content, content)
//                        .addContentKV("area",area)
                        .flowInPipelineTag("kafka")
                        .build();
                commentData.setFilterPipelineResult(true);
                comments.add(commentData);

                String likes = jsonObject.getString("agree");
                CrawlerData commentInteractionData = CrawlerData.builder()
                        .data(requestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), commentId))
                        .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                        .url(itemUrl)
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_I_Likes, likes)
                        .flowInPipelineTag("kafka")
                        .build();
                commentInteractionData.setFilterPipelineResult(true);
                comments.add(commentInteractionData);
            }
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return comments;
    }

    private CrawlerData washBlogArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        String title = httpPage.getHtml().xpath("//h1[@class=\"h1_tit\"]|//h2").get();
//        String author = httpPage.getHtml().xpath("//strong[@id=\"ownernick\"]/text()|//span[@id=\"ownernick\"]/text()").get();
        String author = httpPage.getHtml().xpath("//title").get();
        author = StringUtils.isNotBlank(author) ? author.substring(author.indexOf("_") + 1, author.lastIndexOf("_")) : "";
        List<String> all = httpPage.getHtml().xpath("//div[@id=\"sina_keyword_ad_area2\"]//text()").all();
        String releaseTime = httpPage.getHtml().xpath("//span[@class=\"time SG_txtc\"]/text()").get();

        StringBuffer content = new StringBuffer();
        for (String data : all) {
            if (StringUtils.isEmpty(data)) {
                continue;
            }
            content.append(data).append(" ");
        }

        List<String> keywordsList = httpPage.getHtml().xpath("//td[@class=\"blog_tag\"]/h3//text()|//span[@class=\"BNE_lkC lk_a\"]/text()").all();
        for (String tag : keywordsList) {
            requestRecord.tagsCreator().bizTags().addKeywords(tag);
        }

        CrawlerData article = null;
        try {
            article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .releaseTime(DateUtils.parseDate(releaseTime.trim(), "(yyyy-MM-dd HH:mm:ss)", "yyyy-MM-dd HH:mm:ss").getTime())
                    .url(itemUrl)
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Content, content.toString().trim())
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return article;
    }

    private CrawlerData washBlogInteraction(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("_") + 1, articleUrl.lastIndexOf("."));

        String rawText = httpPage.getRawText();
        try {

            JSONObject jsonObject = JSONObject.parseObject(rawText.substring(rawText.lastIndexOf("{"), rawText.indexOf("}") + 1));
            String collection = jsonObject.getString("f");
            String likes = jsonObject.getString("d");
            String views = jsonObject.getString("r");
            String forwards = jsonObject.getString("z");
            String comments = jsonObject.getString("c");


            CrawlerData interaction = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .releaseTime(requestRecord.getReleaseTime())
                    .url(itemUrl)
                    .addContentKV(Field_I_Collection, collection)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_I_Views, views)
                    .addContentKV(Field_I_Forwards, forwards)
                    .addContentKV(Field_I_Comments, comments)
                    .build();
            return interaction;
        } catch (Exception e) {
            log.error("page(“" + itemUrl + "”) can not get interaction！");
        }
        return null;
    }

    public List<CrawlerData> washBlogComment(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        List<CrawlerData> commentList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleUrl = httpPage.getJson().jsonPath($_type + ".result.news.url").get();
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("_") + 1, articleUrl.lastIndexOf("."));

        List<String> all = httpPage.getJson().jsonPath($_type + ".result.cmntlist").all();
        List<String> threaddicts = httpPage.getJson().jsonPath($_type + ".result.threaddict").all();
        for (String threaddict : threaddicts) {
            Map entry = new Json(threaddict).toObject(Map.class);
            Set set = entry.keySet();
            for (Object key : set) {
                all.addAll(new Json(String.valueOf(entry.get(key))).jsonPath($_type + ".list").all());
            }
        }
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String against = jsonObject.getString("against");
            String author = jsonObject.getString("nick");
            String content = jsonObject.getString("content");
            String area = jsonObject.getString("area");
            String releaseTime = jsonObject.getString("time");

            CrawlerData comment = null;
            try {
                comment = CrawlerData.builder()
                        .data(requestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), against))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .url(itemUrl)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Content, content)
                        .build();

                commentList.add(comment);
                String comments = "0";
                try {
                    comments = jsonObject.getJSONObject("content_ext").getString("reply");
                } catch (Exception e) {
                    comments = "0";
                }

                CrawlerData commentData = CrawlerData.builder()
                        .data(requestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment.enumVal(), against))
                        .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), against))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .url(itemUrl)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_I_Comments, comments)
                        .build();
                commentList.add(commentData);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
        return commentList;
    }

    private CrawlerData washMedArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        String title = httpPage.getHtml().xpath("//h1/text()").get();
        String author = "";
        String source = "";
        List<Selectable> nodes = httpPage.getHtml().xpath("//span[contains(@class,'wz-zuthorname')]").nodes();
        for (Selectable node : nodes) {
            List<String> all = node.xpath(".//text()").all();
            for (String data : all) {
                if (data.contains("来源：")) {
                    source = node.xpath("./em/a/text()").get();
                    if (StringUtils.isBlank(source)) {
                        source = node.xpath("./em/text()").get();
                        if (StringUtils.isBlank(source)) {
                            source = node.xpath("./text()").get();
                            source = StringUtils.isNotBlank(source) ? source.split("：")[1] : "";
                        }
                    }
                }
            }

            if (all.contains("作者：")) {
                author = node.xpath("./text()").get().split("：")[1];
            }
        }

        String releaseTime = httpPage.getHtml().xpath("//span[@class=\"wz-fbtime\"]/text()").get();
        DateFormat df = new SimpleDateFormat("EEE MMM dd HH:mm:ss z yyyy", Locale.ENGLISH);

        List<String> all = httpPage.getHtml().xpath("//div[@class=\"textbox\"]//text()").all();
        StringBuffer content = new StringBuffer();
        for (String data : all) {
            if (StringUtils.isBlank(data) || (StringUtils.isNotBlank(data) && data.contains("*声明："))) {
                continue;
            }
            content.append(data).append(" ");
        }
        List<String> keywordsList = httpPage.getHtml().xpath("//div[@class=\"biaoqian\"]/a/text()").all();
        if (keywordsList != null && keywordsList.size() > 0) {
            requestRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type, keywordsList);
        }

        CrawlerData article = null;
        try {
            Long releseTimeToLong = StringUtils.isNotBlank(releaseTime) ? df.parse(releaseTime.replace("　", "")).getTime() : requestRecord.getReleaseTime();
            article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .releaseTime(releseTimeToLong)
                    .url(itemUrl)
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Content, content.toString().trim())
                    .addContentKV(Field_Source, source)
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return article;
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        if (extras == null) {
            return;
        }
        String searchKwSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchKwSourceUrl)) {
            log.error("searchKw source url can not is null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String itemUrl = String.format(searchKwSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                if(itemUrl.matches(SEARCH_KW_URL)){
                    turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                }else{
                    turnRecord.tagsCreator().bizTags().addCustomKV(Tag_Site_Info,keyword);
                    turnRecord.getHttpRequest().setExtras(requestRecord.getHttpRequest().getExtras());
                }
                crawlerRecords.add(turnRecord);
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error("sina download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static String washContent(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    public static void main(String[] args) {
        String url = "http://stock.finance.sina.com.cn/stock/go.php/vReport_Show/kind/lastest/rptid/668871858744/index.phtml";

        String[] split = url.split("/");

        System.out.println(split[split.length - 2]);


    }
}
