package com.chance.cc.crawler.development.scripts.bitauto.article;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConfig;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.downloader.http.HttpClientDownloader;
import com.chance.cc.crawler.core.downloader.http.HttpClientGenerator;
import com.chance.cc.crawler.core.downloader.proxy.SiteConfigProxyProvider;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.google.common.collect.Maps;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.DigestUtils;

import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Series;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Topic_Type;

/**
 * @author lt
 * @version 1.0
 * @date 2020-12-07 12:04:42
 * @email okprog@sina.com
 */
public class BitAutoArticleCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(BitAutoArticleCrawlerScript.class);

    private static final String indexRegex = "https?://news\\.yiche\\.com/";
    private static final String cateUrlRegex = "https?://news\\.yiche\\.com/\\w*/";
    private static final String listUrlRegex = "https?://news\\.yiche\\.com/\\S*/\\?pageindex=\\d*";
    private static final String articleUrlRegex = "https?://news\\.yiche\\.com/\\w*/\\d{8}/\\d*\\.html";
    private static final String commentUrlRegex = "https?://newsapi\\.yiche\\.com/comment/comment/getdata\\S*";
    private static final String likesUrlRegex = "https://news\\.yiche\\.com/web_api/information_api/api/v1/support/support_info\\S*";
    private static final String viewsUrlRegex = "https://newsapi\\.yiche\\.com/promotion-api/traffic/news/pv-total\\?ids=\\d*";
    private static final String keysRegex = "https?://\\S*v1/meta/bitauto/keys\\S*";
    private static final String seriesNewsListUrlRegex = "https://car\\.(yiche|bitauto)\\.com/newcar/\\S*/yuanchuang/\\d*/";

    private static final String commentUrl = "https://news.yiche.com/web_api/information_api/api/v1/comment/top_comments\\S*";

    private static final String seriesNewsListUrlFormat = "https://car.%s.com/newcar/%s/yuanchuang/%s/";
    private static final String commentUrlFormat = "https://newsapi.yiche.com/comment/comment/getdata?productId=%s&objectId=%s&pageIndex=%s&pageSize=%s&isHot=false&_=%s";
    private static final String likesUrlFormat = "https://news.yiche.com/web_api/information_api/api/v1/support/support_info?cid=508&param=%7B%22contentId%22%3A%22#productId%22%2C%22contentType%22%3A%22#contentType%22%7D";
    private static final String viewsUrlFormat = "https://newsapi.yiche.com/promotion-api/traffic/news/pv-total?ids=%s";
    private static String nowDomain = "bitauto";

    private static final String scriptSite = "article";

    @Override
    public String domain() {
        return "bitauto";
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(commentUrl);
        addUrlRegular(indexRegex);
        addUrlRegular(cateUrlRegex);
        addUrlRegular(listUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentUrlRegex);
        addUrlRegular(keysRegex);
        addUrlRegular(seriesNewsListUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String crawlerSite = crawlerRequestRecord.tagsCreator().bizTags().site();;
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();
        if (supportSourceRecords == null || supportSourceRecords.size() < 1) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        CrawlerRequestRecord keywordRecord = supportSourceRecords.get(0);
        String keywordUrl = keywordRecord.getHttpRequest().getUrl();
        if (keywordUrl.matches(keysRegex)) {
            try {
                CrawlerRequestRecord indexRecord = supportSourceRecords.get(1);
                JSONObject jsonObject = JSONObject.parseObject(keywordRecord.getInternalDownloadPage().getRawText());
                HttpPage downloadPage = indexRecord.getInternalDownloadPage();
                String indexUrl = downloadPage.getHtml().xpath("//li[@data-ctitle=\"yiche\"]/a/@href").get();
                nowDomain = indexUrl.split("\\.")[1];

                if (jsonObject.getIntValue("status") == 0) {
                    JSONArray objects = jsonObject.getJSONArray("content");
                    for (Object object : objects) {
                        String keyword = ((JSONObject)object).getString("keyword");
                        keyword = "quanxinaodia4l";
                        String homeUlr = String.format(seriesNewsListUrlFormat,nowDomain,keyword,1);
                        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(requestRecord)
                                .recordKey(homeUlr)
                                .httpUrl(homeUlr)
                                .releaseTime(System.currentTimeMillis())
                                .copyBizTags()
                                .copyScheduleTags()
                                .notFilterRecord()
                                .build();
                        crawlerRequestRecord.getHttpRequest().setMethod("GET");
                        allItemRecords.add(crawlerRequestRecord);
                        break;
                    }
                }

            }catch (Exception e){
                logger.error(e.getMessage(),e);
            }
        }
        if (allItemRecords.isEmpty()){
            return super.prepareRequest(requestRecord,supportSourceRecords);
        }
        return allItemRecords;

    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (StringUtils.isBlank(httpPage.getRawText()) || (statusCode != 200 && statusCode != 404)){
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        if (lastRequestUrl.matches(indexRegex)){
            return parseCategoryLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(cateUrlRegex) || lastRequestUrl.matches(listUrlRegex)){
            return parseListLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequestUrl);
        }
        if (lastRequestUrl.matches(seriesNewsListUrlRegex)){
            return parseSeriesListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex)){
            return parseItemLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequest, lastRequestUrl);
        }
        if (lastRequestUrl.matches(commentUrlRegex)){
            return parseCommentLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequest);
        }
        if (lastRequestUrl.matches(commentUrl)){
            this.getTurnComment(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return parsedLinks;
    }

    private void getTurnComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        try{
            List<String> list = httpPage.getJson().jsonPath($_type + ".data.data").all();
            if (list.size() == 0){
                return;
            }
        }catch (Exception e){
            crawlerRequestRecord.setNeedWashPage(false);

            logger.error("没有下一页");
            return;
        }

        KVTag currentPage = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("currentPage");
        Integer val = (Integer) currentPage.getVal();
        val = val + 1;
        String contentType = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("contentType");
        String contentId = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("topicId");
        String pageSize = "20";
        String hotFlag = "true";
        String url = "https://news.yiche.com/web_api/information_api/api/v1/comment/top_comments" +
                "?cid=508&param=%7B%22contentType%22%3A"+contentType+"%2C%22contentId%22%3A"+contentId+"%2C%22currentPage%22%3A"+val+"%2C%22pageSize%22%3A20%2C%22hotFlag%22%3Atrue%7D";

        String signAndTime = this.getSign(contentType,contentId,val,pageSize,hotFlag);
        String[] split1 = signAndTime.split("/");
        String sign = split1[0];
        String time = split1[1];
        CrawlerRequestRecord likesRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .recordKey(url)
                .httpUrl(url)
                .needParsed(true)
                .needWashed(true)
                .resultLabelTag(comment)
                .copyBizTags()
                .copyRequestTags()
                .build();
        HttpRequest likesRequest = likesRecord.getHttpRequest();
        likesRequest.addHeader("x-platform","pc");
        likesRequest.addHeader("x-sign",sign);
        likesRequest.addHeader("x-timestamp",time);
        likesRecord.tagsCreator().bizTags().addCustomKV("contentType",contentType);
        likesRecord.tagsCreator().bizTags().addCustomKV("topicId",contentId);
        likesRecord.tagsCreator().bizTags().addCustomKV("currentPage",val);
        parsedLinks.add(likesRecord);
    }

    private List<CrawlerRequestRecord> parseSeriesListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String[] urls = httpRequestUrl.split("/");
        String series = urls[4];
        int curPage = Integer.parseInt(urls[6]);
        List<Selectable> selectables = httpPage.getHtml().xpath("//div[@class=\"article-list clearfix\"]/div").nodes();
        if (null != selectables){
            if (selectables.size() == 10){
                String nextPageUrl = String.format(seriesNewsListUrlFormat,nowDomain,series,(curPage + 1));
                CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(nextPageUrl)
                        .recordKey(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                parsedLinks.add(nextPageRecord);
            }
            for (Selectable itemNode : selectables) {
                String itemUrl = String.format("%s%s","https:",itemNode.xpath("./div/h2/a/@href").get());
                String pubTime = itemNode.xpath("./div/div/div/span[1]/text()").get();
                String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
                try {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd").getTime())
                            .resultLabelTag(article)
                            .resultLabelTag(interaction)
                            .copyBizTags()
                            .copyScheduleTags()
                            .needWashed(false)
                            .build();
                    Map<String,Object> extras = Maps.newHashMap();
                    extras.put("articleKey",articleKey);
                    extras.put("listUrl",httpRequestUrl);
                    extras.put("itemUrl",itemUrl);
                    itemRecord.getHttpRequest().setExtras(extras);
                    parsedLinks.add(itemRecord);
                } catch (Exception e) {
                    logger.error(e.getMessage(),e);
                }

            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCategoryLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        List<Selectable> cate1stNodes = httpPage.getHtml().xpath("//ul[@id=\"wenzhangNav\"]/li").nodes();
        for (Selectable cate1stNode : cate1stNodes) {
            String cateUrl = cate1stNode.xpath("./a/@href").get();
            if (!cateUrl.startsWith("http")){
                cateUrl = "https:" + cateUrl;
            }
            if (!isUrlMatch(cateUrl,cateUrlRegex)){
                continue;
            }
            String cate1st = cate1stNode.xpath("./a/text()").get();
            CrawlerRequestRecord cateRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(cateUrl)
                            .recordKey(cateUrl)
                            .releaseTime(System.currentTimeMillis())
                            .needParsed(true)
                            .needWashed(false)
                            .copyBizTags()
                            .copyScheduleTags()
                            .notFilterRecord()
                            .build();
                    List<String> path = new ArrayList<>();
                    path.add(cate1st);
                    cateRecord.tagsCreator().bizTags().addCustomKV(Field_Path,path);
                    parsedLinks.add(cateRecord);
        }
        String shczUrl = "https://news.yiche.com/shcz/c1177/?pageindex=1";
        CrawlerRequestRecord shczRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(shczUrl)
                .recordKey(shczUrl)
                .releaseTime(System.currentTimeMillis())
                .needParsed(true)
                .needWashed(false)
                .copyBizTags()
                .copyScheduleTags()
                .notFilterRecord()
                .build();
        List<String> path = new ArrayList<>();
        path.add("上海车展");
        shczRecord.tagsCreator().bizTags().addCustomKV(Field_Path,path);
        parsedLinks.add(shczRecord);
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, String lastRequestUrl) {
        String listUrl = "";
        if (lastRequestUrl.matches(cateUrlRegex)){
            listUrl = String.format("%s%s",lastRequestUrl,"?pageindex=2");
        }
        if (lastRequestUrl.matches(listUrlRegex)){
            String[] strings = lastRequestUrl.split("=");
            int currentPage = Integer.parseInt(strings[1]);
            listUrl = strings[0] + "=" + (currentPage + 1);
        }
        CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .recordKey(listUrl)
                .httpUrl(listUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyScheduleTags()
                .build();
        parsedLinks.add(turnPageRequest);

        List<Selectable> itemNodes = httpPage.getHtml().xpath("//div[@class=\"card-list-box clearfix\"]/div[@class=\"article-card horizon\"]").nodes();
        for (Selectable itemNode : itemNodes) {
            String itemUrl = String.format("%s%s","https:",itemNode.xpath("./div/div/h2/a/@href").get());
            String pubTime = itemNode.xpath("./div/div/div[@class=\"info\"]/div[@class=\"last\"]/span[@class=\"time\"]/text()").get();
            List<String> articleTags = itemNode.xpath("./div/div/ul/li/a/text()").all();
            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(itemUrl)
                        .httpUrl(itemUrl)
                        .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd").getTime())
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .copyScheduleTags()
                        .needWashed(false)
                        .build();
                itemRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type,articleTags);
                Map<String,Object> extras = Maps.newHashMap();
                extras.put("articleKey",articleKey);
                extras.put("listUrl",lastRequestUrl);
                extras.put("itemUrl",itemUrl);
                itemRecord.getHttpRequest().setExtras(extras);
                parsedLinks.add(itemRecord);
            } catch (Exception e) {
                logger.error(e.getMessage(),e);
            }
        }
        return parsedLinks;
    }
    private String getSign(String contentType, String contentId, Integer val, String pageSize, String hotFlag) {
        String time = String.valueOf(System.currentTimeMillis());
        String s  = "cid=508&param={\"contentType\":"+contentType+",\"contentId\":"+contentId+",\"currentPage\":"+val+",\"pageSize\":"+pageSize+",\"hotFlag\":"+hotFlag+"}19DDD1FBDFF065D3A4DA777D2D7A81EC"+time;
        HttpClientDownloader downloader = new HttpClientDownloader();
        downloader.setClientGenerator(new HttpClientGenerator());
        downloader.setProxyProvider(new SiteConfigProxyProvider());
        HttpConfig httpConfig = HttpConfig.me(nowDomain);
        HttpRequest httpRequest = new HttpRequest();
        httpRequest.setResponseCharset("UTF-8");
        httpRequest.setMethod("post");
        httpRequest.setUrl("http://192.168.1.210:8899/encrypt/sign");
        Map<String,Object> bodyMap = new HashMap<>();
        bodyMap.put("params",s);
        HttpRequestBody form = HttpRequestBody.form(bodyMap, "UTF-8");
        httpRequest.setRequestBody(form);
        HttpPage download = downloader.download(httpRequest, httpConfig);
        String rawText = download.getRawText();
        return rawText+"/"+time;
    }
    private List<CrawlerRequestRecord> parseItemLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest, String lastRequestUrl) {
        crawlerRequestRecord.setNeedWashPage(true);
        Map<String, Object> extras = lastRequest.getExtras();
        String articleKey = lastRequestUrl.substring(lastRequestUrl.lastIndexOf("/") + 1).split("\\.")[0];
        String topicId = articleKey.substring(2);
        Matcher viewMt = Pattern.compile("newsId:\\s*'\\d*',").matcher(httpPage.getRawText());
        while (viewMt.find()){
            topicId = viewMt.group(0).split("'")[1];
        }
        // 获取contentType
        Matcher matcher = Pattern.compile("contentType:\\s*'\\d*'").matcher(httpPage.getRawText());
        String contentType = "";
        while (matcher.find()){
            contentType = matcher.group(0).split("'")[1];
        }
        String productId = "21".equals(contentType) ? "8" : "1";

        //likes链接
        String seriesId = httpPage.getHtml().xpath("//ul[@class=\"news-recommend-car-wrap\"]/li[1]/a/@data-id").get();
        String contentId = crawlerRequestRecord.getHttpRequest().getUrl().substring(crawlerRequestRecord.getHttpRequest().getUrl().lastIndexOf("/") + 1).split("\\.")[0];
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV("contentId",contentId);
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV("ArticleUrl",crawlerRequestRecord.getHttpRequest().getUrl());
        this.getCommentUrl(crawlerRequestRecord,httpPage,parsedLinks,seriesId,topicId,contentType);
      // String signAndTime = this.getSign(contentType, contentId, val, pageSize, seriesId);
     //   String[] split1 = signAndTime.split("/");
       // String sign = split1[0];
        /*String likesUrl = likesUrlFormat.replace("#productId",topicId).replace("#contentType",contentType);
        CrawlerRequestRecord likesRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(likesUrl)
                .httpUrl(likesUrl)
                .needParsed(true)
                .needWashed(true)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        String timestamp = String.valueOf(System.currentTimeMillis());
        HttpRequest likesRequest = likesRecord.getHttpRequest();
        likesRequest.addHeader("Host","news.yiche.com");
       likesRequest.addHeader("Referer",lastRequestUrl);
        likesRequest.addHeader("User-Agent",getRandomUA());
        likesRequest.addHeader("x-platform","pc");
        //likesRequest.addHeader("x-sign",sign);
       // likesRequest.addHeader("x-timestamp",time);
       likesRequest.addHeader("x-user-guid",UUID.randomUUID().toString());*/
       // parsedLinks.add(likesRecord);

        //views的链接
       /* extras.put("topicId",topicId);
        String viewsUrl = String.format(viewsUrlFormat,topicId);
        CrawlerRequestRecord viewsRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(viewsUrl)
                .httpUrl(viewsUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        HttpRequest viewsRequest = viewsRecord.getHttpRequest();
        viewsRequest.addHeader("Host","newsapi.yiche.com");
        viewsRequest.addHeader("Referer","https://news.yiche.com/");
        viewsRequest.addHeader("User-Agent",getRandomUA());*/
       // parsedLinks.add(viewsRecord);


        // 评论的翻页
        //评论列表链接
       /* CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
        if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
            if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                logger.error("{} crawler comment need to filter information!", domain());
                return parsedLinks;
            }
            KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
            //生成外循环评论链接
            String commentUrl = String.format(commentUrlFormat,productId,topicId,1,50,System.currentTimeMillis());
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .startPageRequest(domain(), CrawlerEnum.CrawlerRequestType.turnPage)
                    .recordKey(commentUrl)
                    .httpUrl(commentUrl)
                    .httpConfig(crawlerRequestRecord.getHttpConfig())
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .build();
            commentRecord.setFilter(filterInfoRecord.getFilter());
            commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            CategoryTag bizTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
            commentRecord.tagsCreator().addCategoryTag(bizTag);
            CategoryTag scheduleTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
            commentRecord.tagsCreator().addCategoryTag(scheduleTag);
            Map<String,Object> commentExtras = copyExtras(extras);
            commentExtras.put("currentNum",1);
            commentExtras.put("productId",productId);
            commentExtras.put("articleKey",articleKey);
            commentRecord.getHttpRequest().setExtras(commentExtras);
            parsedLinks.add(commentRecord);
        }*/
        return parsedLinks;
    }

    private void getCommentUrl(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, String seriesId, String topicId, String contentType) {
        int currentPage = 1;
        String pageSize = "20";
        String hotFlag = "true";
        String url = "https://news.yiche.com/web_api/information_api/api/v1/comment/top_comments" +
                "?cid=508&param=%7B%22contentType%22%3A"+contentType+"%2C%22contentId%22%3A"+topicId+"%2C%22currentPage%22%3A"+currentPage+"%2C%22pageSize%22%3A20%2C%22hotFlag%22%3Atrue%7D";
        String signAndTime = this.getSign(contentType, topicId, currentPage, pageSize, hotFlag);
        String[] split1 = signAndTime.split("/");
        String sign = split1[0];
        String time = split1[1];
        CrawlerRequestRecord likesRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(url)
                .httpUrl(url)
                .needParsed(true)
                .needWashed(true)
                .copyBizTags()
                .copyResultTags()
                .resultLabelTag(comment)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .build();
        HttpRequest likesRequest = likesRecord.getHttpRequest();
        likesRequest.addHeader("User-Agent",getRandomUA());
        likesRequest.addHeader("x-platform","pc");
        likesRequest.addHeader("x-sign",sign);
        likesRequest.addHeader("x-timestamp",time);

        likesRecord.tagsCreator().bizTags().addCustomKV("currentPage",currentPage);
        likesRecord.tagsCreator().bizTags().addCustomKV("contentType",contentType);
        likesRecord.tagsCreator().bizTags().addCustomKV("topicId",topicId);
        parsedLinks.add(likesRecord);
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest) {
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        int code = pageObj.getIntValue("code");
        String msg = pageObj.getString("msg");
        int size = pageObj.getJSONObject("result").getIntValue("size");
        int total = pageObj.getJSONObject("result").getIntValue("total");
        int listSize = pageObj.getJSONObject("result").getJSONArray("list").size();
        Map<String, Object> extras = lastRequest.getExtras();
        int currentNum = (int) extras.get("currentNum");

        if (code == 0 && "OK".equalsIgnoreCase(msg) && (currentNum * size) < total){
            if (listSize > 0) {
                crawlerRequestRecord.setNeedWashPage(true);
                String topicId = (String) extras.get("topicId");
                String productId = (String) extras.get("productId");
                String nextCommentUrl = String.format(commentUrlFormat,productId,topicId,(currentNum + 1),50,System.currentTimeMillis());
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextCommentUrl)
                        .httpUrl(nextCommentUrl)
                        .releaseTime(crawlerRequestRecord.getReleaseTime())
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .needWashed(true)
                        .copyBizTags()
                        .copyScheduleTags()
                        .notFilterRecord()
                        .build();
                Map<String,Object> commentExtras = copyExtras(extras);
                commentExtras.put("currentNum",currentNum + 1);
                commentRecord.getHttpRequest().setExtras(commentExtras);
                parsedLinks.add(commentRecord);
            }
        }
        return parsedLinks;
    }

    /**
     * @param url 请求Url
     * @return header的sign值
     */
    private String generatorSignFromUrl(String url, String timestamp){
        Map<String, Object> urlParams = getUrlParams(url);
        try {
            if (null != urlParams){
                String u = "19DDD1FBDFF065D3A4DA777D2D7A81EC";
                String cid = (String) urlParams.get("cid");
                String param = URLDecoder.decode((String) urlParams.get("param"),"utf-8");
                String s = "cid=" + cid + "&param=" + param + u + timestamp;
                return DigestUtils.md5DigestAsHex(s.getBytes());
            }
        } catch (UnsupportedEncodingException e) {
            logger.error(e.getMessage(),"url decode error");
        }
        return UUID.randomUUID().toString().replaceAll("-","");
    }

//    @Test
//    public void testSign() throws Exception {
//        String url = "https://news.yiche.com/web_api/information_api/api/v1/support/support_info?cid=508&param=%7B%22contentId%22%3A%2213368116%22%2C%22contentType%22%3A%2220%22%7D";
//        Map<String, Object> urlParams = getUrlParams(url);
//        String cid = (String) urlParams.get("cid");
//        String u = "19DDD1FBDFF065D3A4DA777D2D7A81EC";
//        String param = URLDecoder.decode((String) urlParams.get("param"),"utf-8");
//        String date = String.valueOf(new Date().getTime());
//        String s = "cid=" + cid + "&param=" + param + u + date;
//        String sign = DigestUtils.md5DigestAsHex(s.getBytes());
//        System.out.println(date);
//        System.out.println(sign);
//        System.out.println(UUID.randomUUID().toString());
//    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        HttpRequest httpRequest = crawlerRecord.getHttpRequest();
        String requestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        if (requestUrl.matches(articleUrlRegex)){
            for (CrawlerRequestRecord downloadRecord : internalDownloadRecords) {
                HttpRequest recordHttpRequest = downloadRecord.getHttpRequest();
                String recordHttpRequestUrl = recordHttpRequest.getUrl();
                if (recordHttpRequestUrl.matches(likesUrlRegex)){
                    JSONObject pageObject = JSONObject.parseObject(downloadRecord.getInternalDownloadPage().getRawText());
                    if (pageObject.getIntValue("status") == 1 && "success".equalsIgnoreCase(pageObject.getString("message"))){
                        try {
                            JSONObject dataObj = pageObject.getJSONObject("data");
                            String likes = dataObj.getString("supportCount");
                            extras.put("likes",likes);
                        } catch (Exception e) {
                            extras.put("likes","0");
                        }
                    }
                }
                if (recordHttpRequestUrl.matches(viewsUrlRegex)){
                    JSONObject pageObject = JSONObject.parseObject(downloadRecord.getInternalDownloadPage().getRawText());
                    if (pageObject.getIntValue("status") == 1 && "ok".equalsIgnoreCase(pageObject.getString("message"))){
                        JSONArray objects = pageObject.getJSONArray("data");
                        if (objects.size() > 0){
                            JSONObject viewsObj = objects.getJSONObject(0);
                            String views = viewsObj.getString("num");
                            extras.put("views",views);
                        }else {
                            extras.put("views","0");
                        }
                    }

                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            if (url.matches(articleUrlRegex)){
                crawlerDataList.addAll(washArticle(crawlerRequestRecord, httpPage));
            }
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
            crawlerDataList.addAll(washInteraction(crawlerRequestRecord,httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)){
            if (url.matches(likesUrlRegex)){
                crawlerDataList.addAll(washComment(crawlerRequestRecord,httpPage));
            }
            if (url.matches(commentUrl)){
               this.washNewComment(crawlerRequestRecord,httpPage,crawlerDataList);
            }
        }

        return crawlerDataList;
    }

    private void washNewComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerData> crawlerDataList) {
        List<String> list = httpPage.getJson().jsonPath($_type + ".data.data").all();
        String contentId = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("contentId");
        String ArticleUrl = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("ArticleUrl");
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        for (String str : list){
            JSONObject jsonObject = JSONObject.parseObject(str);
            String content = jsonObject.getString("content");
            String createTime = jsonObject.getString("createTime");
            String id = jsonObject.getString("id");
            String userId = jsonObject.getString("userId");
            String showName = jsonObject.getString("showName");
            long time = 0;
            try {
               time = DateUtils.parseDate(createTime, "yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                e.printStackTrace();
            }
            if (!isDateRange(crawlerRequestRecord,time)){
                continue;
            }
            JSONArray commentFullReplies = jsonObject.getJSONArray("commentFullReplies");
            if (commentFullReplies != null){
                if (commentFullReplies.size()>0){
                    for (Object obj :commentFullReplies){
                        JSONObject jsonObject1 = JSONObject.parseObject(String.valueOf(obj));
                        String content1 = jsonObject1.getString("content");
                        String createTime1 = jsonObject1.getString("createTime");
                        String userId1 = jsonObject1.getString("userId");
                        String username1 = jsonObject1.getString("username");
                        String id1 = jsonObject1.getString("id");
                        long time1 = 0;
                        if (createTime1 != null){
                            try {
                                time1 = DateUtils.parseDate(createTime, "yyyy-MM-dd HH:mm:ss").getTime();
                            } catch (ParseException e) {
                                e.printStackTrace();
                            }
                        }else{
                            time1 = System.currentTimeMillis();
                        }
                        if (!isDateRange(crawlerRequestRecord,time)){
                            continue;
                        };
                        CrawlerData crawlerData = CrawlerData.builder()
                                .data(crawlerRequestRecord,httpPage)
                                .url(ArticleUrl)
                                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), id1))
                                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain() ,site, article.enumVal(), contentId))
                                .releaseTime(time1)
                                .addContentKV(Field_Content,content1)
                                .addContentKV(Field_Author,username1)
                                .addContentKV(Field_Author_Id,userId1)
                                .resultLabelTag(comment)
                                .build();
                        crawlerData.tagsCreator().bizTags().addSiteBiz("news");
                        crawlerDataList.add(crawlerData);
                    }
                }
            }
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(ArticleUrl)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), id))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain() ,site, article.enumVal(), contentId))
                    .releaseTime(time)
                    .addContentKV(Field_Content,content)
                    .addContentKV(Field_Author,showName)
                    .addContentKV(Field_Author_Id,userId)
                    .resultLabelTag(comment)
                    .build();
            crawlerData.tagsCreator().bizTags().addSiteBiz("news");
            crawlerDataList.add(crawlerData);
        }

    }
    /**
     * 判断是否在时间范围内
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord,Long releaseTimeToLong){
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    }else if(hourFromNow != 0){
                        endTime = System.currentTimeMillis()-60000;//系统时间减去一分钟
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if(startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime){
                isRange = true;
            }
        }else{
            isRange = true;
        }
        return isRange;
    }
    private List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        Html pageHtml = httpPage.getHtml();
        String title = pageHtml.xpath("//h1[@class=\"news-detail-header\"]/text()").get();
        title = unescapeHtml2J(title);
        String author = pageHtml.xpath("//a[@class=\"news-detail-profile-active\"]/text()").get();
        String authorUrl = pageHtml.xpath("//a[@class=\"news-detail-profile-active\"]/@href").get();
        String authorId = authorUrl.split("/u")[1].replace("/","");
        List<String> allContents = pageHtml.xpath("//div[@class=\"news-detail-main motu_cont\"]//p//text()").all();
        StringBuffer content = new StringBuffer();
        for (String allContent : allContents) {
            allContent = allContent.replaceAll("&\\S*?;","").replaceAll("\n","").replaceAll("\\wdquo;","").trim();
            content = content.append(allContent);
        }

        List<String> allImages = pageHtml.xpath("//div[@class=\"news-detail-main motu_cont\"]//p/img/@data-original").all();
        StringBuffer imageUrl = new StringBuffer();
        for (String allImage : allImages) {
            imageUrl = imageUrl.append(allImage).append("\\x01");
        }
        String publishTime = "";
        Matcher matcher = Pattern.compile("publishTime:\\s*'\\S*\\s*\\S*',").matcher(httpPage.getRawText());
        while (matcher.find()){
            publishTime = matcher.group(0).split("'")[1];
        }
        String contentId = httpRequest.getUrl().substring(httpRequest.getUrl().lastIndexOf("/") + 1).split("\\.")[0];
        long releaseTime = 0;
        try {
            releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime,"yyyy-MM-dd HH:mm:ss").getTime();
        } catch (ParseException e) {
            logger.warn(e.getMessage(),"parse date error");
            releaseTime = crawlerRequestRecord.getReleaseTime();
        }
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord,httpPage)
                .url(httpRequest.getUrl())
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), contentId))
                .releaseTime(releaseTime)
                .addContentKV(Field_Title,title)
                .addContentKV(Field_Content,unescapeHtml2J(content.toString()))
                .addContentKV(Field_Author,author)
                .addContentKV(Field_Author_Id,authorId)
                .addContentKV(Field_Urls, (String) extras.get("listUrl"))
                .addContentKV(Field_Images,imageUrl.toString())
                .resultLabelTag(article)
                .build();
        crawlerData.tagsCreator().bizTags().addSiteBiz("news");

        //车系信息
        List<Map<String,String>> seriesList = new ArrayList<>();
        List<Selectable> seriesNodes = pageHtml.xpath("//ul[@class=\"news-recommend-car-wrap\"]/li").nodes();
        if (null != seriesNodes && seriesNodes.size() > 0){
            for (Selectable seriesNode : seriesNodes) {
                String seriesId = seriesNode.xpath("./a[@target]/@data-id").get();
                String seriesUrl = seriesNode.xpath("./a[@target]/@href").get();
                String seriesName = seriesNode.xpath("./h3/a/text()").get();
                Map<String,String> seriesInfo = new HashMap<>();
                seriesInfo.put("series_name",seriesName);
                seriesInfo.put("series_url",seriesUrl);
                seriesInfo.put("series_id",seriesId);
                seriesList.add(seriesInfo);
            }
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series,seriesList);
        }
        crawlerDataList.add(crawlerData);
        return crawlerDataList;
    }

    private List<CrawlerData> washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        Html pageHtml = httpPage.getHtml();
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (lastRequestUrl.matches(articleUrlRegex)){
            String contentId = httpRequest.getUrl().substring(httpRequest.getUrl().lastIndexOf("/") + 1).split("\\.")[0];
            String comments = pageHtml.xpath("//li[@class=\"news-detail-position-pinglun\"]/a/text()").get();
            String likes = (String) extras.get("likes");
            String views = (String) extras.get("views");
            long releaseTime = 0;
            String publishTime = "";
            Matcher matcher = Pattern.compile("publishTime:\\s*'\\S*\\s*\\S*',").matcher(httpPage.getRawText());
            while (matcher.find()){
                publishTime = matcher.group(0).split("'")[1];
            }
            try {
                releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime,"yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                logger.warn(e.getMessage(),"parse date error");
                releaseTime = crawlerRequestRecord.getReleaseTime();
            }
            try {
                int cmt = Integer.parseInt(comments.trim());
            }catch (Exception e){
                comments = "0";
            }
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(httpRequest.getUrl())
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), contentId))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), contentId))
                    .releaseTime(releaseTime)
                    .addContentKV(Field_I_Comments,comments)
                    .addContentKV(Field_I_Likes,likes)
                    .addContentKV(Field_I_Views,views)
                    .resultLabelTag(interaction)
                    .build();
            crawlerData.tagsCreator().bizTags().addSiteBiz("news");
            crawlerDataList.add(crawlerData);
        }
        if (lastRequestUrl.matches(commentUrlRegex)){
            //解析列表
            JSONObject resultObj = JSONObject.parseObject(httpPage.getRawText()).getJSONObject("result");
            JSONArray commentsList = resultObj.getJSONArray("list");
            for (Object commentItem : commentsList) {
                JSONObject commentObj = (JSONObject)commentItem;
                String contentId = commentObj.getString("id");
                String likes = commentObj.getString("likeCount");
                String publishTime = commentObj.getString("createTime");
                long releaseTime = 0;
                try {
                    releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime,"yyyy-MM-dd HH:mm:ss").getTime();
                } catch (ParseException e) {
                    logger.warn(e.getMessage(),"parse date error");
                    releaseTime = crawlerRequestRecord.getReleaseTime();
                }
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url((String) extras.get("itemUrl"))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), contentId))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                        .releaseTime(releaseTime)
                        .addContentKV(Field_I_Likes,likes)
                        .resultLabelTag(interaction)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .build();
                crawlerData.tagsCreator().bizTags().addSiteBiz("news");
                crawlerDataList.add(crawlerData);
            }
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String articleKey = (String) extras.get("articleKey");
        //解析列表
        JSONObject resultObj = JSONObject.parseObject(httpPage.getRawText()).getJSONObject("result");
        JSONArray commentsList = resultObj.getJSONArray("list");
        for (Object commentItem : commentsList) {
            JSONObject commentObj = (JSONObject)commentItem;
            String contentId = commentObj.getString("id");
            String content = commentObj.getString("content");
            String author = commentObj.getString("showName");
            String authorId = commentObj.getString("userId");
            String floor = commentObj.getString("floor");

            String publishTime = commentObj.getString("createTime");
            long releaseTime = 0;
            try {
                releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime,"yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                logger.warn(e.getMessage(),"parse date error");
                releaseTime = crawlerRequestRecord.getReleaseTime();
            }
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(httpRequest.getUrl())
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .releaseTime(releaseTime)
                    .addContentKV(Field_Content,content)
                    .addContentKV(Field_Author,author)
                    .addContentKV(Field_Author_Id,authorId)
                    .addContentKV(Field_Floor,floor)
                    .resultLabelTag(comment)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .build();
            crawlerData.tagsCreator().bizTags().addSiteBiz("news");
            crawlerDataList.add(crawlerData);
        }
        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 将url参数转换成map
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = null;
        if (url.contains("?")){
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    private static List<String> agentList = new ArrayList<>();

    static {
        agentList.add("Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/536.6");
        agentList.add("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/536.6");
        agentList.add("Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.1");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)");
        agentList.add("Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.28.3 (KHTML, like Gecko) Version/3.2.3 ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/525.28.3");
        agentList.add("Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16");
        agentList.add("Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14");
        agentList.add("Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14");
        agentList.add("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14");
        agentList.add("Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02");
        agentList.add("Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0");
        agentList.add("Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.13 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400");
    }

    private static String getRandomUA(){
        return agentList.get(RandomUtils.nextInt(0,agentList.size() - 1));
    }

    public static Map<String, Object> copyExtras(Map<String,Object> inExtras){
        Map<String,Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(),entry.getValue());
        }
        return extras;
    }

    public static String unescapeHtml2J(String str){
        int times = 0;
        while (str.contains("&") && str.contains(";")){
            str = StringEscapeUtils.unescapeHtml(str);
            times ++;
            if (times > 5){
                break;
            }
        }
        return str;
    }
}
