package com.chance.cc.crawler.development.scripts.xcar.news;

import com.alibaba.fastjson.JSON;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.meta.core.bean.common.MetaResponse;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainKeys;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateFormatUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.message.BasicNameValuePair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.nio.charset.Charset;
import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Series;

/**
 * @author bx
 * @date 2021/1/8 0008 12:11
 */
public class XCarNewsCrawlerScript extends CrawlerCommonScript {

    private static final Logger logger = LoggerFactory.getLogger(XCarNewsCrawlerScript.class);

    public static final String site= "news";
    public static final String news_site_biz= "news";
    public static final String video_site_biz= "video";

    public static final String infoXCarRegular = "https://info.xcar.com.cn\\S*";//新车
    public static final String suvXCarRegular = "https://suv.xcar.com.cn\\S*";//suv
//    public static final String tagsXCarRegular = "https://tags.xcar.com.cn/api/detail\\S*";//SUV车型导购
////    public static final String tagsXCarArticleUrlFormat = "https://tags.xcar.com.cn/api/detail?tongji=tags-airticle&tag_id=1000014&type=1&page=%d&limit=20&uid=0";//SUV车型导购
////    public static final String tagsXCarVideoUrlFormat = "https://tags.xcar.com.cn/api/detail?tongji=tags-video&tag_id=1000014&type=2&page=%d&limit=20&uid=0";//SUV车型导购
    public static final String greenXCarRegular = "https://green.xcar.com.cn\\S*";//新能源
    public static final String newCarXCarRegular = "https://newcar.xcar.com.cn/list\\S*";//导购
    public static final String newCarArticleXCarRegular = "https://newcar.xcar.com.cn/\\d*/\\S*.html\\S*";//导购
    public static final String driveXCarRegular = "https://drive.xcar.com.cn\\S*";//试驾
    public static final String xtvXCarRegular = "https://xtv.xcar.com.cn\\S*";//爱卡视频
    public static final String newsArticleCommentUrlRegular = "https://comment.xcar.com.cn/interface/index.php\\S*";//评论

    public static final String newsArticleCommentUrlFormat = "https://comment.xcar.com.cn/interface/index.php?" +
            "iact=CommentLevel&cid=%s&action=getNewsComment&sort=time&ctype=%s&page=%d&limit=25&_=%s";

    public static final String newsArticleCommentWebUrl = "https://comment.xcar.com.cn/comment.php?nid=%s&ctype=%s";

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();

        if (supportSourceRecords == null || supportSourceRecords.size() <1){
            return super.prepareRequest(requestRecord,supportSourceRecords);
        }

        CrawlerRequestRecord keywordSupportSourceRecord = null;
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            if (supportSourceRecord.getHttpRequest().getUrl().endsWith("/v1/meta/"+domain()+"/keys?site=news")){
                keywordSupportSourceRecord = supportSourceRecord;
                break;
            }
        }

        if (keywordSupportSourceRecord != null){
            initAllCrawlerRecordByKeyword(requestRecord,keywordSupportSourceRecord,allItemRecords);// cookies userAgents 初始完毕后，才能初始record
        } else {
            logger.error("xcar news start urls cant be empty!");
        }
        return allItemRecords;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> crawlerRequestRecords =  new ArrayList<>();

        if (!page.isDownloadSuccess()){
            logger.error("xcar news download proxy has error ,will retry");
            addCrawlerRecords(crawlerRequestRecords,crawlerRecord);
            crawlerRecord.setNeedWashPage(false); //下载异常，不需要清洗页面
            return crawlerRequestRecords;
        }

        String currentUrl = page.getRequest().getUrl();

        if (isUrlMatch(currentUrl,infoXCarRegular)){

            infoXCarParseLinks(crawlerRequestRecords,crawlerRecord,page);
        } else if (isUrlMatch(currentUrl,suvXCarRegular)){
            suvXCarParseLinks(crawlerRequestRecords,crawlerRecord,page);
        } else if (isUrlMatch(currentUrl,greenXCarRegular)){
            greenXCarParseLinks(crawlerRequestRecords,crawlerRecord,page);
        } else if (isUrlMatch(currentUrl,newCarXCarRegular)){
            newCarXCarParseLinks(crawlerRequestRecords,crawlerRecord,page);
        } else if (isUrlMatch(currentUrl,driveXCarRegular)){
            driveXCarParseLinks(crawlerRequestRecords,crawlerRecord,page);
        } else if (isUrlMatch(currentUrl,xtvXCarRegular)){
            xtvXCarParseLinks(crawlerRequestRecords,crawlerRecord,page);
        } else if (isUrlMatch(currentUrl,newsArticleCommentUrlRegular)){
            newsArticleCommentParseLinks(crawlerRequestRecords,crawlerRecord,page);
        }
        return crawlerRequestRecords;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDatas = new ArrayList<>();
        if (isUrlMatch(page.getRequest().getUrl(),infoXCarRegular)){
            infoXCarArticleWashPage(crawlerDatas,crawlerRecord,page);
        } else if (isUrlMatch(page.getRequest().getUrl(),suvXCarRegular)){
            suvXCarArticleWashPage(crawlerDatas,crawlerRecord,page);
        } else if (isUrlMatch(page.getRequest().getUrl(),greenXCarRegular)){
            greenXCarArticleWashPage(crawlerDatas,crawlerRecord,page);
        } else if (isUrlMatch(page.getRequest().getUrl(),newCarXCarRegular)){
            newCarXCarArticleWashPage(crawlerDatas,crawlerRecord,page);
        } else if (isUrlMatch(page.getRequest().getUrl(),driveXCarRegular)){
            driveXCarArticleWashPage(crawlerDatas,crawlerRecord,page);
        } else if (isUrlMatch(page.getRequest().getUrl(),xtvXCarRegular)){
            xtvXCarArticleWashPage(crawlerDatas,crawlerRecord,page);
        } else if (isUrlMatch(page.getRequest().getUrl(),newsArticleCommentUrlRegular)){
            commentCommonWashPage(crawlerDatas,crawlerRecord,page);
        }
        return crawlerDatas;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(infoXCarRegular);
        addUrlRegular(suvXCarRegular);
        addUrlRegular(greenXCarRegular);
        addUrlRegular(newCarXCarRegular);
        addUrlRegular(newCarArticleXCarRegular);
        addUrlRegular(driveXCarRegular);
        addUrlRegular(xtvXCarRegular);
        addUrlRegular(newsArticleCommentUrlRegular);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        String siteTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return site.equals(siteTag) || StringUtils.isEmpty(siteTag);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return "xcar";
    }

    private void addCrawlerRecords(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord){

        int count = 1;
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        if (crawlerBusinessTags.hasKVTag("download_retry_count")){
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag("download_retry_count").getVal();
            if (count >= 20){
                logger.error("xcar news download he number of retries exceeds the limit" +
                        ",request url {},download detail {}",crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        count++;
        crawlerBusinessTags.addCustomKV("download_retry_count",count);

        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRecord)
                .httpRequest(crawlerRecord.getHttpRequest())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();
        crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initAllCrawlerRecordByKeyword(CrawlerRequestRecord requestRecord,
                                               CrawlerRequestRecord supportSourceRecord,
                                               List<CrawlerRecord> allItemRecords){
        try {
            HttpPage httpPage = supportSourceRecord.getInternalDownloadPage();
            MetaResponse metaResponse = JSON.parseObject(httpPage.getRawText(), MetaResponse.class);
            if (metaResponse.getStatus() == 0 && metaResponse.getContent() != null){
                List<String> contents = (List<String>) metaResponse.getContent();
                for (String content : contents) {
                    CrawlerDomainKeys crawlerDomainKeys = JSON.parseObject(content, CrawlerDomainKeys.class);
                    String keyword = crawlerDomainKeys.getKeyword();
                    Json keywordJson = new Json(keyword);
                    String httpUrl = keywordJson.jsonPath($_type + ".keyword_url").get();
                    String firstLevelMenu = keywordJson.jsonPath($_type + ".first_level_menu").get();
                    String secondLevelMenu = keywordJson.jsonPath($_type + ".second_level_menu").get();
                    CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(requestRecord)
                            .httpUrl(httpUrl)
                            .httpHeads(requestRecord.getHttpRequest().getHeaders())
                            .releaseTime(System.currentTimeMillis())
                            .copyBizTags()
                            .build();
                    crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(filter);
                    crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(Field_Path, Arrays.asList(firstLevelMenu,secondLevelMenu));
                    allItemRecords.add(crawlerRequestRecord);
                }
            }
        } catch (Exception e) {
            logger.error(e.getMessage(),e);
        }
    }

    private void infoXCarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page) {
        crawlerRecord.tagsCreator().bizTags().addDomain(domain());
        crawlerRecord.tagsCreator().bizTags().addSite(site);
        crawlerRecord.tagsCreator().bizTags().addSiteBiz(news_site_biz);

        if(crawlerRecord.tagsCreator().requestTags().hasRequestType(turnPageItem)){ //详情
            articleCommonParseLinks(crawlerRequestRecords,crawlerRecord,page);
        } else if(crawlerRecord.tagsCreator().requestTags().hasRequestType(turnPage)){ //列表
            listCommonParseLinks(crawlerRequestRecords, crawlerRecord, page);
        }
    }
    private void suvXCarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){
        infoXCarParseLinks(crawlerRequestRecords,crawlerRecord,page);
    }
    private void tagsXCarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){

    }
    private void greenXCarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){
        crawlerRecord.tagsCreator().bizTags().addDomain(domain());
        crawlerRecord.tagsCreator().bizTags().addSite(site);
        crawlerRecord.tagsCreator().bizTags().addSiteBiz(news_site_biz);

        if(crawlerRecord.tagsCreator().requestTags().hasRequestType(turnPageItem)){ //详情
            articleCommonParseLinks(crawlerRequestRecords,crawlerRecord,page);
        } else if(crawlerRecord.tagsCreator().requestTags().hasRequestType(turnPage)){ //列表
            listGreenXCarParseLinks(crawlerRequestRecords, crawlerRecord, page);
        }
    }
    private void newCarXCarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){
        infoXCarParseLinks(crawlerRequestRecords,crawlerRecord,page);
    }
    private void driveXCarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){
        infoXCarParseLinks(crawlerRequestRecords,crawlerRecord,page);
    }
    private void xtvXCarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){
        crawlerRecord.tagsCreator().bizTags().addDomain(domain());
        crawlerRecord.tagsCreator().bizTags().addSite(site);
        crawlerRecord.tagsCreator().bizTags().addSiteBiz(video_site_biz);
        if(crawlerRecord.tagsCreator().requestTags().hasRequestType(turnPageItem)){ //详情
            articleXtvXcarParseLinks(crawlerRequestRecords,crawlerRecord,page);
        } else if(crawlerRecord.tagsCreator().requestTags().hasRequestType(turnPage)){ //列表
            listXtvXCarParseLinks(crawlerRequestRecords, crawlerRecord, page);
        }
    }
    private void newsArticleCommentParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){
        try {
            Json commentListJson = new Json(page.getRawText());
            String hasMore = commentListJson.jsonPath($_type + ".config.hasMore").get();
            String cid = commentListJson.jsonPath($_type + ".news.cid").get();
            String ctype = commentListJson.jsonPath($_type + ".news.ctype").get();

            List<NameValuePair> parameters = URLEncodedUtils.parse(page.getRequest().getUrl().split("\\?")[1], Charset.forName("utf-8"));
            String pageNumber = null;
            for (NameValuePair parameter : parameters) {
                if ("page".equals(parameter.getName())){
                    pageNumber = parameter.getValue();
                }
            }
            if ("1".equals(hasMore) && StringUtils.isNotBlank(pageNumber)){
                CrawlerRequestRecord crawlerCommentTurnPageRequestRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRecord)
                        .httpUrl(String.format(newsArticleCommentUrlFormat,cid,ctype,Integer.valueOf(pageNumber) + 1,System.currentTimeMillis()))
                        .httpHeads(page.getRequest().getHeaders())
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .needWashed(true)
                        .build();
                crawlerRequestRecords.add(crawlerCommentTurnPageRequestRecord);
            }

        } catch (Exception e) {
            logger.error("xcar news comment list url {} download failed:{}",page.getRequest().getUrl(),e.getMessage());
            addCrawlerRecords(crawlerRequestRecords, crawlerRecord);
        }
    }

    private void infoXCarArticleWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){

        articleCommentWashPage(crawlerDatas, crawlerRecord, page);

        interactionCommentWashPage(crawlerDatas, crawlerRecord, page);
    }
    private void suvXCarArticleWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){
        infoXCarArticleWashPage(crawlerDatas, crawlerRecord, page);
    }
    private void tagsXCarArticleWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){}
    private void greenXCarArticleWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){
        infoXCarArticleWashPage(crawlerDatas, crawlerRecord, page);
    }
    private void newCarXCarArticleWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){
        infoXCarArticleWashPage(crawlerDatas, crawlerRecord, page);
    }
    private void driveXCarArticleWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){
        infoXCarArticleWashPage(crawlerDatas, crawlerRecord, page);
    }
    private void xtvXCarArticleWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){
        articleXtcXcarWashPage(crawlerDatas, crawlerRecord, page);

        interactionCommentWashPage(crawlerDatas, crawlerRecord, page);
    }

    private void commentCommonWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){
        Json commentsJson = new Json (page.getRawText());
        List<String> comments = commentsJson.jsonPath($_type + ".list").all();
        String cid = commentsJson.jsonPath($_type + ".news.cid").get();
        String ctype = commentsJson.jsonPath($_type + ".news.ctype").get();
        if (comments != null){
            for (String comment : comments) {
                Json commentJson = new Json(comment);
                String commentId = commentJson.jsonPath($_type + ".id").get();
                try {
                    String siteBiz = crawlerRecord.tagsCreator().bizTags().siteBiz();
                    CrawlerData crawlerCommentData = CrawlerData.builder()
                            .data(crawlerRecord,page)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), site,siteBiz, CrawlerEnum.CrawlerDataType.comment.enumVal(),commentId))
                            .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), site, siteBiz,CrawlerEnum.CrawlerDataType.article.enumVal(),cid))
                            .releaseTime(dateToTimestamp(commentJson.jsonPath($_type + ".pub_time").get()))
                            .addContentKV(Field_Author,commentJson.jsonPath($_type + ".user_name").get())
                            .addContentKV(Field_Author_Id,commentJson.jsonPath($_type + ".user_id").get())
                            .addContentKV(Field_Content,commentJson.jsonPath($_type + ".conts").get())
                            .addContentKV(Field_Floor,commentJson.jsonPath($_type + ".layout").get())
                            .url(String.format(newsArticleCommentWebUrl,cid,ctype))
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                            .requestLabelTag(result)
                            .requestLabelTag(filter)
                            .copyBizTags()
                            .build();

                    CrawlerData crawlerCommentInteractionData = CrawlerData.builder()
                            .data(crawlerRecord,page)
                            .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), site,siteBiz, CrawlerEnum.CrawlerDataType.interaction.enumVal(),commentId))
                            .parentId(crawlerCommentData.getDataId())
                            .releaseTime(crawlerCommentData.getReleaseTime())
                            .addContentKV(Field_I_Likes,commentJson.jsonPath($_type + ".ups").get())
                            .url(String.format(newsArticleCommentWebUrl,cid,ctype))
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                            .requestLabelTag(result)
                            .requestLabelTag(filter)
                            .copyBizTags()
                            .build();
                    crawlerDatas.add(crawlerCommentData);
                    crawlerDatas.add(crawlerCommentInteractionData);
                } catch (Exception e) {
                    logger.error("xcar article comment page wash error:{}",e.getMessage());
                }
            }
        }
    }

    private void listCommonParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){
        List<Selectable> nodes = page.getHtml().xpath("//li[@class=\"clearfix moreImgSize\"]").nodes();
        for (Selectable node : nodes) {
            try {
                String releaseTimeStr = node.xpath(".//a[@class=\"iconOne\"]/text()").get();
                List<String> likeViews = node.xpath(".//span[@class=\"remark_object\"]/a/text()").all();
                String articleUrl = "https:"+node.xpath(".//dt[@class=\"listCon_title\"]/a/@href").get()+ "?viewtype=all";
                CrawlerRequestRecord crawlerItemRequestRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRecord)
                        .httpUrl(articleUrl)
                        .httpHeads(page.getRequest().getHeaders())
                        .releaseTime(dateToTimestamp(releaseTimeStr))
                        .copyBizTags()
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                        .build();
                crawlerItemRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
                crawlerItemRequestRecord.getHttpRequest().addExtra(Field_I_Likes,likeViews.get(0));
                crawlerItemRequestRecord.getHttpRequest().addExtra(Field_I_Views,likeViews.get(1));
                crawlerItemRequestRecord.getHttpRequest().addExtra(Field_I_Comments,"0");
                crawlerItemRequestRecord.getHttpRequest().addExtra(Field_Urls,page.getRequest().getUrl());
                crawlerRequestRecords.add(crawlerItemRequestRecord);
            } catch (Exception e) {
                logger.error("xcar parse links error :{}",e.getMessage());
            }
        }

        String nextPageUrl = page.getHtml().xpath("//a[@class=\"page_down\"][@rel=\"nofollow\"]/@href").get();
        CrawlerRequestRecord crawlerTurnPaeRequestRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRecord)
                .httpUrl(nextPageUrl)
                .httpHeads(page.getRequest().getHeaders())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();
        crawlerRequestRecords.add(crawlerTurnPaeRequestRecord);
    }

    private void listGreenXCarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){

        try {
            Json rawTextJson = new Json(page.getRawText());
            String count = rawTextJson.jsonPath($_type + ".count").get();
            String page_size = rawTextJson.jsonPath($_type + ".page_size").get();
            String page_num = rawTextJson.jsonPath($_type + ".page_num").get();

            int totalPage = (int) Math.ceil(Double.valueOf(count)/Double.valueOf(page_size));
            if (Integer.valueOf(page_num) <= totalPage){

                String url = page.getRequest().getUrl().split("\\?")[0];
                String parameterStr = page.getRequest().getUrl().split("\\?")[1];
                List<NameValuePair> parameters = URLEncodedUtils.parse(parameterStr, Charset.forName("utf-8"));
                List<NameValuePair> nextPageUrlParamters = new ArrayList<>();
                for (NameValuePair parameter : parameters) {
                    String name = parameter.getName();
                    String value = parameter.getValue();
                    if ("page_num".equals(parameter.getName())){
                        value = String.valueOf((Integer.valueOf(page_num) + 1));
                    }
                    nextPageUrlParamters.add(new BasicNameValuePair(name,value));
                }
                String nextPageUrl = url + "?" + URLEncodedUtils.format(nextPageUrlParamters, "utf-8");

                CrawlerRequestRecord crawlerTurnPaeRequestRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRecord)
                        .httpUrl(nextPageUrl)
                        .httpHeads(page.getRequest().getHeaders())
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                crawlerRequestRecords.add(crawlerTurnPaeRequestRecord);
            }

            List<String> contents = rawTextJson.jsonPath($_type + ".content_list").all();
            if (contents != null && contents.size() > 0){
                for (String content : contents) {
                    Json contentJson = new Json(content);
                    String articleUrl = contentJson.jsonPath($_type + ".online_url").get()+ "?viewtype=all";
                    String views = contentJson.jsonPath($_type + ".comment_num").get();
                    String likes = contentJson.jsonPath($_type + ".ups_num").get();
                    String releaseTimeStr = contentJson.jsonPath($_type + ".output_time_h").get();
                    CrawlerRequestRecord crawlerItemRequestRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRecord)
                            .httpUrl(articleUrl)
                            .httpHeads(page.getRequest().getHeaders())
                            .releaseTime(dateToTimestamp(releaseTimeStr))
                            .copyBizTags()
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                            .build();
                    crawlerItemRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
                    crawlerItemRequestRecord.getHttpRequest().addExtra(Field_I_Views,views);
                    crawlerItemRequestRecord.getHttpRequest().addExtra(Field_I_Likes,likes);
                    crawlerItemRequestRecord.getHttpRequest().addExtra(Field_I_Comments,"0");
                    crawlerItemRequestRecord.getHttpRequest().addExtra(Field_Urls,page.getRequest().getUrl());
                    crawlerRequestRecords.add(crawlerItemRequestRecord);
                }
            }

        } catch (Exception e) {
            logger.error("green xcar parse links error :{}",e.getMessage());
        }
    }

    private void listXtvXCarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){

        List<Selectable> nodes = page.getHtml().xpath("//div[@class=\"list_mod\"]//dl").nodes();
        for (Selectable node : nodes) {
            try {
                String articleUrl = node.xpath(".//a/@href").get();
                if (articleUrl.startsWith("/show")) {
                    articleUrl = "https://xtv.xcar.com.cn" + articleUrl;
                    String releaseTime = node.xpath(".//span[@class=\"time\"]").get();
                    CrawlerRequestRecord crawlerItemRequestRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRecord)
                            .httpUrl(articleUrl)
                            .httpHeads(page.getRequest().getHeaders())
                            .releaseTime(dateToTimestamp(releaseTime))
                            .copyBizTags()
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                            .build();
                    crawlerItemRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
                    crawlerItemRequestRecord.getHttpRequest().addExtra(Field_Urls, page.getRequest().getUrl());
                    crawlerRequestRecords.add(crawlerItemRequestRecord);
                }
            }catch(ParseException e){
                logger.error("xvt xcar parse links error :{}", e.getMessage());
            }
        }

        String nextPage = StringEscapeUtils.unescapeHtml(page.getHtml().xpath("//a[@class=\"page_down\"]/@href").get());
        if (StringUtils.isNotBlank(nextPage)){
            String nextPageUrl = "https://xtv.xcar.com.cn/"+ nextPage;
            CrawlerRequestRecord crawlerTurnPaeRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(nextPageUrl)
                    .httpHeads(page.getRequest().getHeaders())
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            crawlerRequestRecords.add(crawlerTurnPaeRequestRecord);
        }

    }

    private void articleCommonParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){
        try {
            if (StringUtils.isBlank(articleDataInfo(page))){
                throw new Exception("article data info is null!");
            }
        } catch (Exception e) {
            logger.error("xcar news download article {} page has error: {},will retry",page.getRequest().getUrl(),e.getMessage());
            addCrawlerRecords(crawlerRequestRecords,crawlerRecord);
            return;
        }

        List<Selectable> nodes = page.getHtml().xpath("//div[@class=\"contents_760_wrap\"]//a").nodes();
        for(Selectable node : nodes){
            String commentUrl = node.xpath("./@href").get();
            if (StringUtils.isNotBlank(commentUrl)&&commentUrl.startsWith("//comment.xcar.com.cn/comment.php")) {
                String comments = node.xpath("./text()").get().trim().split(" ")[0].trim();
                crawlerRecord.getHttpRequest().addExtra(Field_I_Comments,comments); //评论数

                CategoryTag categoryTag = crawlerRecord.tagsCreator().scheduleTags().getCategoryTag();
                if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) { //生成评论record

                    if(!crawlerRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")){
                        logger.error("xcar news crawler comment need to filter information!");
                        return;
                    }

                    KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                    CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);

                    List<NameValuePair> parameters = URLEncodedUtils.parse(commentUrl.split("\\?")[1], Charset.forName("utf-8"));
                    String nid=null;
                    String ctype=null;
                    for (NameValuePair parameter : parameters) {
                        if ("nid".equals(parameter.getName())){
                            nid  = parameter.getValue();
                        }
                        if ("ctype".equals(parameter.getName())){
                            ctype  = parameter.getValue();
                        }
                    }

                    CrawlerRequestRecord crawlerCommentTurnPageRequestRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRecord)
                            .httpUrl(String.format(newsArticleCommentUrlFormat,nid,ctype,1,System.currentTimeMillis()))
                            .httpHeads(page.getRequest().getHeaders())
                            .releaseTime(System.currentTimeMillis())
                            .needWashed(true)
                            .notFilterRecord()
                            .copyBizTags()
                            .build();
                    crawlerCommentTurnPageRequestRecord.setFilter(filterInfoRecord.getFilter());
                    crawlerCommentTurnPageRequestRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                    crawlerCommentTurnPageRequestRecord.setTurnPageFilterInfo(filterInfoRecord.getTurnPageFilterInfo());
                    crawlerRequestRecords.add(crawlerCommentTurnPageRequestRecord);
                }
            }
        }
    }

    private void articleXtvXcarParseLinks(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord, HttpPage page){
        String dataInfo;
        try {
            dataInfo = articleDataInfo(page);
            if (StringUtils.isBlank(articleDataInfo(page))){
                throw new Exception("article data info is null!");
            }
        } catch (Exception e) {
            logger.error("xcar news download article {} page has error: {},will retry",page.getRequest().getUrl(),e.getMessage());
            addCrawlerRecords(crawlerRequestRecords,crawlerRecord);
            return;
        }

        Json dataInfoJson = new Json(dataInfo);

        String comments = page.getHtml().xpath("//div[@class=\"otherBox\"]/span[@class=\"commentsNum\"]").get();
        String views = page.getHtml().xpath("//div[@class=\"otherBox\"]/span[@class=\"playNum\"]").get();
        comments = comments.substring(0,comments.indexOf("条"));
        crawlerRecord.getHttpRequest().addExtra(Field_I_Comments,comments); //评论数
        crawlerRecord.getHttpRequest().addExtra(Field_I_Views,views); //浏览量
        if(!"0".equals(comments)){
            CategoryTag categoryTag = crawlerRecord.tagsCreator().scheduleTags().getCategoryTag();
            if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) { //生成评论record

                if(!crawlerRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")){
                    logger.error("xcar news crawler comment need to filter information!");
                    return;
                }

                KVTag filterInfoTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);

                CrawlerRequestRecord crawlerCommentTurnPageRequestRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRecord)
                        .httpUrl(String.format(newsArticleCommentUrlFormat, dataInfoJson.jsonPath($_type + ".content_id").get(),"5",1,System.currentTimeMillis()))
                        .httpHeads(page.getRequest().getHeaders())
                        .releaseTime(System.currentTimeMillis())
                        .needWashed(true)
                        .copyScheduleTags()
                        .copyBizTags()
                        .notFilterRecord()
                        .build();
                crawlerCommentTurnPageRequestRecord.setFilter(filterInfoRecord.getFilter());
                crawlerCommentTurnPageRequestRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                crawlerCommentTurnPageRequestRecord.setTurnPageFilterInfo(filterInfoRecord.getTurnPageFilterInfo());
                crawlerRequestRecords.add(crawlerCommentTurnPageRequestRecord);
            }
        }
    }

    private void articleCommentWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){
        String dataInfo = articleDataInfo(page);
        Json dataInfoJson = new Json(dataInfo);

        //相关车系
        List<Map<String,String>> seriesList = new ArrayList<>();
        Set<String> seriesIds = new HashSet<>();
        List<Selectable> nodes = page.getHtml().xpath("//div[@class=\"car_info_box clearfix\"]//li").nodes();
        for(Selectable selectable : nodes){
            String seriesId = selectable.xpath("./@data-id").get();
            if (!seriesIds.contains(seriesId)){
                Map<String, String> seriesInfo = new HashMap<>();
                seriesInfo.put("series_name",selectable.xpath("./a/@title").get());
                seriesInfo.put("series_url","https:"+selectable.xpath("./a/@href").get());
                seriesInfo.put("series_id",seriesId);
                seriesList.add(seriesInfo);
                seriesIds.add(seriesId);
            }
        }

        String authorFrom = page.getHtml().xpath("//span[@class=\"media_name\"]//a/text()").get();
        List<String> imageUrls = page.getHtml().xpath("//div[@class=\"zym_artical_wrap\"]//img/@src").all();
        String splitStr = "\\x01";
        StringBuffer imageUrlsStr = new StringBuffer();
        for (String imageUrl : imageUrls) {
            imageUrlsStr.append("https:").append(imageUrl).append(splitStr);
        }
        String content = StringUtils.joinWith(
                "",page.getHtml().xpath("//div[@class=\"zym_artical_wrap\"]/p//text()").all().toArray());
        try {
            String siteBiz = crawlerRecord.tagsCreator().bizTags().siteBiz();
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), site,siteBiz,CrawlerEnum.CrawlerDataType.article.enumVal(), dataInfoJson.jsonPath($_type + ".content_id").get()))
                    .releaseTime(dateToTimestamp(dataInfoJson.jsonPath($_type + ".publish_time").get()))
                    .addContentKV(Field_Author, dataInfoJson.jsonPath($_type + ".author_name").get())
                    .addContentKV(Field_Author_Id, dataInfoJson.jsonPath($_type + ".author_id").get())
                    .addContentKV(Field_Title, dataInfoJson.jsonPath($_type + ".content_title").get())
                    .addContentKV(Field_Urls, (String) page.getRequest().getExtras().get(Field_Urls))
                    .addContentKV(Field_Source, authorFrom)
                    .addContentKV(Field_Content, content.trim())
                    .addContentKV(Field_Images, imageUrlsStr.substring(0, imageUrlsStr.lastIndexOf(splitStr)))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .copyBizTags()
                    .build();
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series,seriesList);
            crawlerDatas.add(crawlerData);
        } catch (ParseException e) {
            logger.error("xcar news article page wash error:{}",e.getMessage());
        }
    }

    private void articleXtcXcarWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord, HttpPage page){
        String dataInfo = articleDataInfo(page);
        Json dataInfoJson = new Json(dataInfo);

        //相关车系
        Map<String, String> seriesInfo = new HashMap<>();
        String seriesUrl = page.getHtml().xpath("//dd/h2/a/@href").get();
        if (StringUtils.isNotBlank(seriesUrl)){
            String[] seriesUrls = seriesUrl.split("/");
            seriesInfo.put("series_name",page.getHtml().xpath("//dd/h2/a/@title").get());
            seriesInfo.put("series_url","https:"+seriesUrl);
            seriesInfo.put("series_id",seriesUrls[seriesUrls.length-1]);
        }

        String content = page.getHtml().xpath("//div[@class=\"infoBox\"]/p[1]/text()").get();
        try {
            String siteBiz = crawlerRecord.tagsCreator().bizTags().siteBiz();
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), site,siteBiz, CrawlerEnum.CrawlerDataType.article.enumVal(), dataInfoJson.jsonPath($_type + ".content_id").get()))
                    .releaseTime(dateToTimestamp(dataInfoJson.jsonPath($_type + ".publish_time").get()))
                    .addContentKV(Field_Author, dataInfoJson.jsonPath($_type + ".author_name").get())
                    .addContentKV(Field_Author_Id, dataInfoJson.jsonPath($_type + ".author_id").get())
                    .addContentKV(Field_Title, dataInfoJson.jsonPath($_type + ".content_title").get())
                    .addContentKV(Field_Urls, (String) page.getRequest().getExtras().get(Field_Urls))
                    .addContentKV(Field_Content, content.trim())
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .copyBizTags()
                    .build();
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series,Arrays.asList(seriesInfo));
            crawlerDatas.add(crawlerData);
        } catch (ParseException e) {
            logger.error("xcar news article page wash error:{}",e.getMessage());
        }
    }

    private void interactionCommentWashPage(List<CrawlerData> crawlerDatas,CrawlerRequestRecord crawlerRecord,
                                            HttpPage page){

        String dataInfo = articleDataInfo(page);
        Json dataInfoJson = new Json(dataInfo);

        try {
            String siteBiz = crawlerRecord.tagsCreator().bizTags().siteBiz();

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), site, siteBiz,CrawlerEnum.CrawlerDataType.interaction.enumVal(), dataInfoJson.jsonPath($_type + ".content_id").get()))
                    .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), site,siteBiz, CrawlerEnum.CrawlerDataType.article.enumVal(), dataInfoJson.jsonPath($_type + ".content_id").get()))
                    .releaseTime(dateToTimestamp(dataInfoJson.jsonPath($_type + ".publish_time").get()))
                    .addContentKV(Field_I_Views, (String) page.getRequest().getExtras().get(Field_I_Views))
                    .addContentKV(Field_I_Likes, (String) page.getRequest().getExtras().get(Field_I_Likes))
                    .addContentKV(Field_I_Comments, (String) page.getRequest().getExtras().get(Field_I_Comments))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .copyBizTags()
                    .build();
            crawlerDatas.add(crawlerData);
        } catch (Exception e) {
            logger.error("xcar news interaction page wash error:{}",e.getMessage());
        }
    }

    private String articleDataInfo(HttpPage page){
        String dataStr = "data: ";
        String dataInfo = null;
        Matcher mtAuthor = Pattern.compile("key: '\\S*',\\s*data:\\s*\\{([^}])*\\}").matcher(page.getRawText());
        if (mtAuthor.find()){
            dataInfo = mtAuthor.group();
            dataInfo = dataInfo.substring(dataInfo.indexOf(dataStr)+dataStr.length());

        }
        return dataInfo;
    }

    public long dateToTimestamp(String dataStr) throws ParseException {
        String regEx="[^0-9]+"; //正则表达式，用于匹配非数字串，+号用于匹配出多个非数字串
        Pattern pattern = Pattern.compile(regEx);

        if (dataStr.equals("刚刚")){
            return System.currentTimeMillis();
        } else if (Pattern.matches("\\d*秒前",dataStr)){
            int number = Integer.valueOf(pattern.split(dataStr)[0]);
            return (System.currentTimeMillis() - number*1000L);
        } else if (Pattern.matches("\\d*分钟前",dataStr)){
            int number = Integer.valueOf(pattern.split(dataStr)[0]);
            return (System.currentTimeMillis() - number*60*1000L);
        } else if (Pattern.matches("\\d*小时前",dataStr)){
            int number = Integer.valueOf(pattern.split(dataStr)[0]);
            return (System.currentTimeMillis() - number*60*60*1000L);
        } else if (Pattern.matches("\\d*小时\\d*分钟前",dataStr)){
            int hourNumber = Integer.valueOf(pattern.split(dataStr)[0]);
            int secondNumber = Integer.valueOf(pattern.split(dataStr)[1]);
            long totalMillis = hourNumber*60*60*1000L + secondNumber*60*1000L;
            return (System.currentTimeMillis() - totalMillis);
        } else if (dataStr.startsWith("今天")){
            String currentTime = DateFormatUtils.format(System.currentTimeMillis(), TimeForamtEnum.format1.getFormat());
            dataStr = dataStr.replace("今天", currentTime);
        } else if (dataStr.startsWith("昨天")){
            String yesterdayTime = DateFormatUtils.format(System.currentTimeMillis() - 60*60*24*1000L, TimeForamtEnum.format1.getFormat());
            dataStr = dataStr.replace("昨天", yesterdayTime);
        } else if (dataStr.startsWith("前天")){
            String beforeYesterdayTime = DateFormatUtils.format(System.currentTimeMillis() - 2*60*60*24*1000L, TimeForamtEnum.format1.getFormat());
            dataStr = dataStr.replace("前天", beforeYesterdayTime);
        } else if (Pattern.matches("\\d{2}月\\d{2}[日]*", dataStr)){
            Calendar cal = Calendar.getInstance();
            dataStr = cal.get(Calendar.YEAR)+"年"+dataStr;
        } else if (Pattern.matches("\\d{2}-\\d{2}", dataStr)){
            Calendar cal = Calendar.getInstance();
            dataStr = cal.get(Calendar.YEAR)+"-"+dataStr;
        }
        return DateUtils.parseDateStrictly(dataStr, TimeForamtEnum.allFormats()).getTime();
    }

    public enum TimeForamtEnum {
        format1("yyyy年MM月dd日"),
        format1_1("yyyy年MM月dd日 HH:mm:ss"),
        format1_2("yyyy年MM月dd日 HH:mm"),
        format1_3("yyyy年MM月dd日HH:mm"),
        format1_4("yyyy年MM月dd日 HH点mm分"),
        format1_5("yyyy年MM月dd日 HH点mm分ss秒"),
        format1_6("yyyy年MM月dd日HH点mm分"),
        format1_7("yyyy年MM月dd日HH点mm分ss秒"),


        format4("yyyy-MM-dd HH:mm:ss"),
        format5("yyyy-MM-dd HH:mm"),
        format6("yyyy-MM-dd"),

        format7("yyyy/MM/dd HH:mm:ss"),
        format7_1("MM/dd/yyyy HH:mm:ss"),
        format8("yyyy/MM/dd HH:mm"),
        format9("yyyy/MM/dd"),

        format10("yyyy.MM.dd HH:mm:ss"),
        format11("yyyy.MM.dd HH:mm"),
        format12("yyyy.MM.dd"),

        format13("EEE MMM d HH:mm:ss +0800 yyyy");

        private String format;

        private TimeForamtEnum(String format) {
            this.format = format;
        }

        public static String[] allFormats() {
            TimeForamtEnum[] timeForamtEnums = TimeForamtEnum.values();
            String[] formats = new String[timeForamtEnums.length];
            for (int count = 0; count < timeForamtEnums.length; count++) {
                formats[count] = timeForamtEnums[count].format;
            }
            return formats;
        }

        public String getFormat() {
            return format;
        }
    }
}
