package com.chance.cc.crawler.development.scripts.autohome.article;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConstant;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2020-11-19 17:54:07
 * @email okprog@sina.com
 */
public class AutoHomeArticleCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(AutoHomeArticleCrawlerScript.class);

    private static final String baseUrl = "https://www.autohome.com.cn/";

    private static final String commentCountUrlFormat = "https://reply.autohome.com.cn/api/QueryComment/CountsByObjIds?_appid=cms&appid=1&dataType=json&objids=%s";
    private static final String commentUrlFormat = "https://reply.autohome.com.cn/api/comments/show.json?count=50&appid=1&replydata=1&order=0&page=%s&id=%s";
    private static final String seriesArticleListUrlFormat = "https://www.autohome.com.cn/%s/0/0-0-%s-0/";

    private static final String indexRegex = "https?://www\\.autohome\\.com\\.cn/channel2/union/list\\.html";
    private static final String seriesArticleListUrlRegex = "https://www\\.autohome\\.com\\.cn/\\d*/0/0-0-\\d*-0/";
    private static final String listRegex = "https?://www\\.autohome\\.com\\.cn/\\d*/0/\\d*/\\w*\\.html";
    private static final String articleRegex = "https?://www\\.autohome\\.com\\.cn/\\w*/\\d{6}/\\d*\\.html\\?pvareaid=\\d*|https?://www\\.autohome\\.com\\.cn/\\w*/\\d{6}/\\d*\\.html";
    private static final String articlePageRegex = "https?://www\\.autohome\\.com\\.cn/\\w*/\\d{6}/\\d*-\\d*\\.html\\?pvareaid=\\d*|https?://www\\.autohome\\.com\\.cn/\\w*/\\d{6}/\\d*-\\d*\\.html";
    private static final String commentCountUrlRegex = "https://reply\\.autohome\\.com\\.cn/api/QueryComment/CountsByObjIds\\?_appid=cms&appid=1&dataType=json&objids=\\d*";
    private static final String commentRegex = "https://reply\\.autohome\\.com\\.cn/api/comments/show\\.json\\?count=50&appid=1&replydata=1&order=0&page=\\d*&id=\\d*";
    private static final String keysRegex = "https?://\\S*v1/meta/autohome/keys\\S*";

    private static final long ONE_SECOND = 1000L;
    private static final long ONE_MINUTE = 60000L;
    private static final long ONE_HOUR = 3600000L;
    private static final long ONE_DAY = 86400000L;

    private static final String scriptSite = "article";

    @Override
    public String domain() {
        return "autohome";
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexRegex);
        addUrlRegular(listRegex);
        addUrlRegular(articleRegex);
        addUrlRegular(commentRegex);
        addUrlRegular(seriesArticleListUrlRegex);
        addUrlRegular(keysRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }


    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        HttpRequest httpRequest = requestRecord.getHttpRequest();
        if(requestRecord.tagsCreator().bizTags().hasKVTag(Tag_Field_Domain_Result_Json)){
            KVTag domainResultJson = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(Tag_Field_Domain_Result_Json);
            CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()),CrawlerDomainUrls.class);
            String urlContents = crawlerDomainUrls.getUrl();
            JSONObject contentObj = JSONObject.parseObject(urlContents);
            String articleUrl = contentObj.getString(Field_Urls);
            requestRecord.setNeedParsedPage(true);
            requestRecord.setNeedWashPage(true);
            requestRecord.tagsCreator().resultTags().addResultDataType(article);
            requestRecord.tagsCreator().resultTags().addResultDataType(interaction);
            httpRequest.setUrl(articleUrl);
            requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(Tag_Field_Domain_Result_Json); //移除
        }
        List<CrawlerRecord> allItemRecords = new ArrayList<>();
        if (supportSourceRecords == null || supportSourceRecords.size() < 1) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        CrawlerRequestRecord keywordRecord = null;
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            if (supportSourceRecord.getHttpRequest().getUrl().matches(keysRegex)) {
                keywordRecord = supportSourceRecord;
            }
        }
        if (null == keywordRecord) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        String keywordUrl = keywordRecord.getHttpRequest().getUrl();
        if (keywordUrl.matches(keysRegex)) {
            initKeywordsRecord(requestRecord, allItemRecords, keywordRecord);
        }
        if (allItemRecords.isEmpty()) {
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        return allItemRecords;
    }

    private void initKeywordsRecord(CrawlerRequestRecord requestRecord, List<CrawlerRecord> allItemRecords, CrawlerRequestRecord keywordRecord) {
        try {
            JSONObject jsonObject = JSONObject.parseObject(keywordRecord.getInternalDownloadPage().getRawText());
            if (jsonObject.getIntValue("status") == 0) {
                JSONArray contents = jsonObject.getJSONArray("content");
                for (Object content : contents) {
                    String keyword = ((JSONObject) content).getString("keyword");
                    keyword = "59";
                    String seriesVideoUrl = String.format(seriesArticleListUrlFormat, keyword,1);
                    CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(requestRecord)
                            .httpUrl(seriesVideoUrl)
                            .recordKey(seriesVideoUrl)
                            .httpHead("user-agent",getRandomUA())
                            .releaseTime(System.currentTimeMillis())
                            .needWashed(false)
                            .needParsed(true)
                            .notFilterRecord()
                            .copyBizTags()
                            .build();
                    crawlerRequestRecord.getHttpRequest().setMethod(HttpConstant.Method.GET);
                    crawlerRequestRecord.tagsCreator().bizTags().addCustomKV("seriesId",keyword);
                    allItemRecords.add(crawlerRequestRecord);
                    break;
                }
            }

        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        }
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        int statusCode = httpPage.getStatusCode();
        String lastRequestUrl = lastRequest.getUrl();
        if (StringUtils.isBlank(httpPage.getRawText()) || !httpPage.isDownloadSuccess() || (statusCode != 200 && statusCode != 404)){
            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            return parsedLinks;
        }
        if (statusCode == 404){
            logger.error("页面不存在：" + statusCode);
            return parsedLinks;
        }
        CrawlerBusinessTags bizTags = crawlerRequestRecord.tagsCreator().bizTags();
        // 解析各文章板块列表链接
        if (lastRequestUrl.matches(indexRegex)){
            return parseIndexLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        //解析文章列表进行翻页
        if (lastRequestUrl.matches(listRegex)){
            return parseListLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        //车系文章列表
        if (lastRequestUrl.matches(seriesArticleListUrlRegex)){
            return parseSeriesArticleListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        //解析文章翻页 获取评论
        if (lastRequestUrl.matches(articleRegex)){
            return parseArticleLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequest, lastRequestUrl);
        }
        if (lastRequestUrl.matches(commentRegex)){
            return parseCommentLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequest);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseSeriesArticleListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String seriesId = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("seriesId");
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        int curPage = Integer.parseInt(httpRequestUrl.split("-")[2]);
        Html html = httpPage.getHtml();
        String next = html.xpath("//a[@class=\"page-item-next\"]/text()").get();
        if (StringUtils.isNotBlank(next)){
            String nextPageUrl = String.format(seriesArticleListUrlFormat,seriesId,(curPage + 1));
            CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(nextPageRecord);
        }
        List<Selectable> selectables = html.xpath("//div[@class=\"cont-info\"]/ul/li").nodes();
        for (Selectable selectable : selectables) {
            String url = selectable.xpath("./h3/a/@href").get();
            if (StringUtils.isBlank(url)){
                continue;
            }
            String itemUrl = "https://www.autohome.com.cn" + url;
            String pubTime = selectable.xpath("./p[@class=\"name-tx\"]/span[2]/text()").get();
            String views = selectable.xpath("./p[@class=\"name-tx\"]/span[3]/text()").get();
            String title = selectable.xpath("./h3/a/text()").get();
            Map<String,Object> extras = new HashMap<>();
            extras.put("views",views);
            extras.put("title",title);
            extras.put("listUrl", httpRequestUrl);
            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(itemUrl)
                        .httpUrl(itemUrl)
                        .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd").getTime())
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                itemRecord.getHttpRequest().setExtras(extras);
                parsedLinks.add(itemRecord);
            } catch (Exception e) {
                logger.error(e.getMessage(),"parse date error");
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest) {
        try{
            httpPage.getJson().jsonPath($_type + ".commentlist").all();
            httpPage.getJson().jsonPath($_type + ".commentcountall").all();
            httpPage.getJson().jsonPath($_type + ".commentcount").all();
            httpPage.getJson().jsonPath($_type + ".page").all();
        }catch (Exception e){
            logger.error("comment url is download error!detail is [{}]",JSONObject.toJSONString(httpPage));
            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            return parsedLinks;
        }
        if(httpPage.getJson().jsonPath($_type + ".commentlist").all().size() < 1){
            logger.error("requestUrl is download is complete[{}]",JSONObject.toJSONString(httpPage));
        }
        //根据返回的json结果计算下一页
        Map<String, Object> articleExtras = lastRequest.getExtras();
        String articleKey = (String) articleExtras.get("articleKey");
        JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
        int currentPage = jsonObject.getIntValue("page");
        int commentCount = jsonObject.getIntValue("commentcount");
        int maxPage = (int) Math.ceil(commentCount / 50.0);
        if (currentPage < maxPage){
            String commentUrl = String.format(commentUrlFormat,(currentPage + 1), articleKey);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(commentUrl)
                    .httpUrl(commentUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.turnPageItem)
                    .copyBizTags()
                    .build();

            commentRecord.getHttpRequest().setExtras(copyExtras(articleExtras));
            parsedLinks.add(commentRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest, String lastRequestUrl) {
        if (httpPage.getRawText().contains("尊敬的用户您好，您的访问出现异常，为确认本次访问为正常用户行为")){
            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            return parsedLinks;
        }
        crawlerRequestRecord.setNeedWashPage(true);
        Map<String, Object> listExtras = lastRequest.getExtras() == null ? new HashMap<>() : lastRequest.getExtras();
        lastRequest.setExtras(listExtras);
        String articleKey = lastRequestUrl.substring(lastRequestUrl.lastIndexOf("/") + 1).split("\\.")[0];
        articleKey = articleKey.contains("-") ? articleKey.split("-")[0] : articleKey;
        listExtras.put("articleKey",articleKey);
        //先判断一次是否是图片翻页
        String fanHui = httpPage.getHtml().xpath("//a[@id=\"fanhui2\"]/text()").get();
        if (StringUtils.isNotBlank(fanHui)){
            String itemUrl = lastRequestUrl.split("\\?")[0] + "?" + "2023231";
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(itemUrl)
                    .httpUrl(itemUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .copyBizTags()
                    .resultLabelTag(article)
                    .resultLabelTag(interaction)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.turnPageItem)
                    .notFilterRecord()
                    .build();
            itemRecord.getHttpRequest().setExtras(copyExtras(listExtras));
            crawlerRequestRecord.setNeedWashPage(false);
            parsedLinks.add(itemRecord);
            return parsedLinks;
        }
        String articlePage = httpPage.getHtml().xpath("//span[@class=\"athm-page__count\"]/text()").get();
        if (StringUtils.isNotBlank(articlePage)){ //获取所有的文章翻页链接
            List<String> articlePages = httpPage.getHtml().xpath("//div[@class=\"menu-wrap\"]/ul/li/a/@href").all();
            articlePages.remove(0);
            for (String pageUrl : articlePages) {
                String articlePageUrl = baseUrl + pageUrl.substring(1);
                CrawlerRequestRecord itemTurnPageRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(articlePageUrl)
                        .httpUrl(articlePageUrl)
                        .releaseTime(crawlerRequestRecord.getReleaseTime())
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                        .build();
                itemTurnPageRecord.getHttpRequest().setExtras(listExtras);
                parsedLinks.add(itemTurnPageRecord);
            }
        }


        //评论数接口
        String commentCountUrl = String.format(commentCountUrlFormat,articleKey);
        CrawlerRequestRecord commentCountRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(commentCountUrl)
                .httpUrl(commentCountUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        commentCountRecord.getHttpRequest().setExtras(listExtras);
        parsedLinks.add(commentCountRecord);

        //评论列表链接
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
        if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
            if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                logger.error("{} crawler comment need to filter information!", domain());
                return parsedLinks;
            }
            KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
            //生成外循环评论链接
            String commentUrl = String.format(commentUrlFormat,1,articleKey);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .needWashed(true)
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .copyBizTags()
                    .build();
            commentRecord.setFilter(filterInfoRecord.getFilter());
            commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            commentRecord.getHttpRequest().setExtras(listExtras);
            parsedLinks.add(commentRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();

        String nextPageUrl = null;
        try {
            nextPageUrl = baseUrl + (httpPage.getHtml().xpath("//a[@class=\"next\"]/@href").get().split("cn/")[1]);
        } catch (Exception e) {
            nextPageUrl = "";
        }
        if (StringUtils.isNotBlank(nextPageUrl)){
            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(turnPageRequest);
        }
        List<Selectable> selectables = httpPage.getHtml().xpath("//ul[@class=\"travellist\"]/li").nodes();
        for (Selectable selectable : selectables) {
            String itemUrl = baseUrl + selectable.xpath("./div/p[@class=\"title\"]/a/@href").get().split("cn/")[1];
            itemUrl = itemUrl.replace("#","?");
            if (StringUtils.isBlank(itemUrl)){
                continue;
            }
            if (itemUrl.matches(articlePageRegex)){
                itemUrl = itemUrl.split("-")[0] + ".h" + itemUrl.split("\\.h")[1];
            }
            String pubTime = selectable.xpath("./div/p[@class=\"info\"]/span[1]/text()").get();
            String views = selectable.xpath("./div/p[@class=\"info\"]/span[2]/text()").get();
            String title = selectable.xpath("./div/p[@class=\"title\"]/a/text()").get();
            Map<String,Object> extras = new HashMap<>();
            extras.put("views",views);
            extras.put("title",title);
            extras.put("listUrl", lastRequest.getUrl());
            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(itemUrl)
                        .httpUrl(itemUrl)
                        .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd").getTime())
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                itemRecord.getHttpRequest().setExtras(extras);

                parsedLinks.add(itemRecord);
            } catch (Exception e) {
                logger.error(e.getMessage(),"parse date error");
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseIndexLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        Html html = httpPage.getHtml();
        List<Selectable> firstNodes = html.xpath("//div[@class=\"content\"]/div[@class=\"tags-section  fn-left\"]").nodes();
        for (Selectable firstNode : firstNodes) {
            String tag1st = firstNode.xpath("./h4/text()").get();
            List<Selectable> secondNodes = firstNode.xpath("./ul/li").nodes();
            for (Selectable secondNode : secondNodes) {
                String tag2nd = secondNode.xpath("./a/text()").get();
                String listUrl = secondNode.xpath("./a/@href").get().replace("http:","https:");
                CrawlerRequestRecord indexRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(listUrl)
                        .httpUrl(listUrl)
                        .httpConfig(crawlerRequestRecord.getHttpConfig())
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .notFilterRecord()
                        .build();

                List<String> pathTag = new ArrayList<>();
                pathTag.add(tag1st);
                pathTag.add(tag2nd);
                indexRecord.tagsCreator().bizTags().addCustomKV(Field_Path,pathTag);
                parsedLinks.add(indexRecord);
            }
        }

//
//        List<Selectable> allTags = html.xpath("//li[@class=\"nav-item\"]/a").nodes();
//        for (Selectable allTag : allTags) {
//            String tagUrl = baseUrl + allTag.xpath("./@href").get().split("cn/")[1] + "1/#liststart";
//            if (tagUrl.matches(listRegex)){
//                String tagVal = allTag.xpath("./text()").get();
//                CrawlerRequestRecord indexRecord = CrawlerRequestRecord.builder()
//                        .turnPageRequest(crawlerRequestRecord)
//                        .recordKey(tagUrl)
//                        .httpUrl(tagUrl)
//                        .httpConfig(crawlerRequestRecord.getHttpConfig())
//                        .releaseTime(System.currentTimeMillis())
//                        .copyBizTags()
//                        .notFilterRecord()
//                        .build();
//                Map<String,Object> extras = new HashMap<>();
//                extras.put("type1st",tagVal);
//                indexRecord.getHttpRequest().setExtras(extras);
//                parsedLinks.add(indexRecord);
//            }
//        }
        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        HttpRequest lastRequest = crawlerRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        Map<String, Object> extras = lastRequest.getExtras();
        extras.put("sumContent","");
        extras.put("sumImages","");
        if (lastRequestUrl.matches(articleRegex)){
            for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
                HttpRequest downloadRecordHttpRequest = internalDownloadRecord.getHttpRequest();
                String downloadRecordHttpRequestUrl = downloadRecordHttpRequest.getUrl();
                HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
                // 拼接文章详情
                if (downloadRecordHttpRequestUrl.matches(articlePageRegex)){
                    List<String> allContents = internalDownloadPage.getHtml().xpath("//div[@id=\"articleContent\"]//p//text()").all();
                    StringBuffer sbContent = new StringBuffer();
                    for (String s : allContents) {
                        sbContent = sbContent.append(s).append(" ");
                    }
                    StringBuffer preContent = new StringBuffer((String) extras.get("sumContent"));
                    extras.put("sumContent",preContent.append(sbContent).toString());
                    //拼接图片地址
                    List<String> allImages = internalDownloadPage.getHtml().xpath("//div[@id=\"articleContent\"]//p[@align=\"center\"]//img/@src").all();
                    StringBuffer sbImages = new StringBuffer();
                    for (String s : allImages) {
                        sbImages = sbImages.append(s).append(",");
                    }
                    StringBuffer preImages = new StringBuffer((String) extras.get("sumImages"));
                    extras.put("sumImages",preImages.append(sbImages).toString());

                }
                // 获取评论数
                if (downloadRecordHttpRequestUrl.matches(commentCountUrlRegex)){
                    try {
                        JSONObject jsonObject = JSONObject.parseObject(internalDownloadPage.getRawText());
                        int returnCode = jsonObject.getIntValue("returncode");
                        if (0 == returnCode){
                            JSONObject result = jsonObject.getJSONObject("result");
                            JSONObject sum = result.getJSONObject("sum");
                            String comments = sum.getString("sumreplycount");
                            extras.put("comments",comments);
                        }
                    } catch (Exception e) {
                        crawlerRecord.setNeedWashPage(false);
                        crawlerRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                        links.add(crawlerRecord);
                    }
                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            crawlerDataList.addAll(washArticle(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
            crawlerDataList.addAll(washInteraction(crawlerRequestRecord,httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)){
            crawlerDataList.addAll(washComment(crawlerRequestRecord,httpPage));
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = lastRequest.getExtras();
        CrawlerBusinessTags bizTags = crawlerRequestRecord.tagsCreator().bizTags();
        CategoryTag categoryTag = bizTags.getCategoryTag();
        Html html = httpPage.getHtml();
        List<String> allContents = html.xpath("//div[@id=\"articleContent\"]//p//text()").all();
        StringBuffer sbContent = new StringBuffer();
        for (String s : allContents) {
            sbContent = sbContent.append(s).append(" ");
        }
        StringBuffer preContent = new StringBuffer((String) extras.get("sumContent"));
        //拼接图片地址
        List<String> allImages = html.xpath("//div[@id=\"articleContent\"]//p[@align=\"center\"]//img/@src").all();
        StringBuffer sbImages = new StringBuffer();
        for (String s : allImages) {
            sbImages = sbImages.append(s).append(",");
        }
        StringBuffer preImages = new StringBuffer((String) extras.get("sumImages"));

        String title = StringUtils.isBlank((String) extras.get("title")) ? html.xpath("//h1/text()").get() : (String) extras.get("title");

        List<String> authors = html.xpath("//div[@class=\"article-info\"]/div[@class=\"name\"]//text()").all();
        String author = "";
        List<String> allAuthor = new ArrayList<>();
        for (String s : authors) {
            if (StringUtils.isNotBlank(s.trim())){
                author = s.replaceAll("&\\S*?;","").replaceAll("\n","").trim();
                if (StringUtils.isNotBlank(author)){
                    allAuthor.add(author);
                }
            }
        }
        author = allAuthor.get(0);

        String authorId = "";
        Matcher mtAuthor = Pattern.compile("var\\s*EditorId\\s*=\\s*\"\\d*\";").matcher(httpPage.getRawText());
        while (mtAuthor.find()){
            authorId = mtAuthor.group(0).split("\"")[1];
        }
        if (StringUtils.isBlank(authorId)){
            try{
                authorId = html.xpath("//div[@class=\"article-info\"]/div/a/@href").get();
                authorId = authorId.split("_")[1].split("\\.")[0];
            }catch (Exception e){
                author = "";
            }
        }
        String pubTime = html.xpath("//div[@class=\"article-info\"]/span[@class=\"time\"]/text()").get();
        if (pubTime.contains("\n")){
            pubTime = pubTime.split("\n")[1].trim();
        }

        String source = html.xpath("//span[@class=\"source\"]/a/text()").get();

        List<String> topicType = html.xpath("//div[@class=\"marks\"]/a/text()").all();
        if (null != topicType && topicType.size() > 0){
            categoryTag.addKVTag(Tag_Field_Topic_Type,topicType);
        }

        //添加文章车系信息标签
        List<Map<String,String>> seriesList = new ArrayList<>();
        List<Selectable> seriesNodes = html.xpath("//div[@class=\"car-information\"]/div[@class=\"car-data\"] | //div[@class=\"car-information\"]/div[@class=\"car-skip\"]").nodes();
        if (null != seriesNodes && seriesNodes.size() > 0){
            for (Selectable seriesNode : seriesNodes) {
                String seriesName = seriesNode.xpath("./h4/a/text() | ./div[@class=\"name\"]/a/text()").get();
                if (seriesName.contains("款")){
                    String[] seriesNameSplit = seriesName.split(" ");
                    if (seriesNameSplit.length > 0){
                        seriesName = seriesNameSplit[0];
                    }
                }
                String seriesUrl = seriesNode.xpath("./h4/a/@href | ./div[@class=\"name\"]/a/@href").get();
                String[] split = seriesUrl.split("/");
                String seriesId = split[split.length - 2];
                seriesUrl = baseUrl + seriesId + "/";
                Map<String, String> seriesInfo = new HashMap<>();
                seriesInfo.put("series_name",seriesName);
                seriesInfo.put("series_url",seriesUrl);
                seriesInfo.put("series_id",seriesId);
                seriesList.add(seriesInfo);
            }
            categoryTag.addKVTag(Tag_Field_Series,seriesList);
        }

        try {
            String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
            CrawlerData crawlerArticleData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), (String)extras.get("articleKey")))
                    .url(lastRequest.getUrl())
                    .releaseTime(DateUtils.parseDate(pubTime, "yyyy年MM月dd日 HH:mm").getTime())
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Source, source)
                    .addContentKV(Field_Urls, StringUtils.isBlank((String) extras.get("listUrl")) ? "" : (String) extras.get("listUrl"))
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Content, preContent.append(sbContent).toString())
                    .addContentKV(Field_Images, preImages.append(sbImages).toString())
                    .resultLabelTag(article)
                    .build();
            crawlerArticleDataList.add(crawlerArticleData);
        } catch (ParseException e) {
            logger.error(e.getMessage(),"parse date error");
        }
        return crawlerArticleDataList;
    }

    private List<CrawlerData> washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerInteractionDataList = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        Map<String, Object> extras = lastRequest.getExtras();
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        if (lastRequestUrl.matches(articleRegex)){
            String articleKey = (String)extras.get("articleKey");
            String views = (String)extras.get("views");
            views = StringUtils.isBlank(views) ? "" : views;
            if (views.contains("万")){
                views = views.replace("万","");
                if (views.contains(".")){
                    views = views.replace(".","") + "000";
                }else {
                    views = views + "0000";
                }
            }
            String comments = (String)extras.get("comments");

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), articleKey))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .url(String.format("https://www.autohome.com.cn/comment/Articlecomment.aspx?articleid=%s",articleKey))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_I_Comments,String.valueOf(comments))
                    .addContentKV(Field_I_Views,views)
                    .resultLabelTag(interaction)
                    .build();
            crawlerInteractionDataList.add(crawlerData);
        }
        if (lastRequestUrl.matches(commentRegex)){
            JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
            JSONArray commentList = jsonObject.getJSONArray("commentlist");
            for (Object o : commentList) {
                JSONObject commentJson = (JSONObject)o;
                String commentId = commentJson.getString("ReplyId");
                String likes = commentJson.getString("RUp");
                String pubTime = commentJson.getString("replydate");

                try {
                    CrawlerData crawlerData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), commentId))
                            .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                            .url(lastRequest.getUrl())
                            .releaseTime(cleanTime(pubTime))
                            .addContentKV(Field_I_Likes,likes)
                            .resultLabelTag(interaction)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .build();
                    crawlerData.setFilter(CrawlerEnum.CrawlerRecordFilter.dateRange);
                    crawlerInteractionDataList.add(crawlerData);
                } catch (ParseException e) {
                    logger.error(e.getMessage(),"parse date error");
                }
            }
        }
        return crawlerInteractionDataList;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerCommentDataList = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = lastRequest.getExtras();
        JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
        JSONArray commentList = jsonObject.getJSONArray("commentlist");
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        for (Object o : commentList) {
            JSONObject commentJson = (JSONObject)o;
            String commentId = commentJson.getString("ReplyId");
            String pubTime = commentJson.getString("replydate");
            String author = commentJson.getString("RMemberName");
            String authorId = commentJson.getString("RMemberId");
            String content = commentJson.getString("RContent");
            String floor = commentJson.getString("RFloor");
            try {
                CrawlerData crawlerCommentData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), extras.get("articleKey")))
                        .url(String.format("https://www.autohome.com.cn/comment/Articlecomment.aspx?articleid=%s",extras.get("articleKey")))
                        .releaseTime(cleanTime(pubTime))
                        .addContentKV(Field_Author,author)
                        .addContentKV(Field_Author_Id,authorId)
                        .addContentKV(Field_Content,content)
                        .addContentKV(Field_Floor,floor)
                        .resultLabelTag(comment)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .build();
                crawlerCommentData.setFilter(CrawlerEnum.CrawlerRecordFilter.dateRange);
                crawlerCommentDataList.add(crawlerCommentData);
            } catch (ParseException e) {
                logger.error(e.getMessage(),"parse date error");
            }
        }
        return crawlerCommentDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private long cleanTime(String timeStr) throws ParseException {
        if (timeStr.contains("发布")){
            timeStr = timeStr.split("：")[1];
        }
        if (timeStr.contains("刚刚")){
            return System.currentTimeMillis();
        }else if (timeStr.contains("前")){
            return timeBefore(timeStr);
        }else if (timeStr.contains("-")){
            return DateUtils.parseDate(timeStr,"yyyy-MM-dd HH:mm:ss").getTime();
        }else {
            return 0;
        }

    }

    private long timeBefore(String timeStr) {
        if (timeStr.matches("\\d*天前")){
            int timeNum = Integer.parseInt(timeStr.split("天")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_DAY);

        }else if (timeStr.matches("\\d*秒前")){
            int timeNum = Integer.parseInt(timeStr.split("秒")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_SECOND);

        }else if (timeStr.matches("\\d*分钟前")){
            int timeNum = Integer.parseInt(timeStr.split("分钟")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_MINUTE);

        }else if (timeStr.matches("\\d*分钟\\d*秒前")) {
            String[] split = timeStr.split("分钟");
            int minutes = Integer.parseInt(split[0]);
            int seconds = Integer.parseInt(split[1].split("秒")[0]);
            long times = (minutes * ONE_MINUTE) + (seconds * ONE_SECOND);
            return System.currentTimeMillis() - times;

        }else if (timeStr.matches("\\d*小时前")){
            int timeNum = Integer.parseInt(timeStr.split("小时")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_HOUR);

        }else if (timeStr.matches("\\d*小时\\d*分钟前")){
            String[] split = timeStr.split("小时");
            int hours = Integer.parseInt(split[0]);
            int minutes = Integer.parseInt(split[1].split("分钟")[0]);
            long times = (hours * ONE_HOUR) + (minutes * ONE_MINUTE);
            return System.currentTimeMillis() - times;

        }else if (timeStr.matches("\\d*周前")){
            int timeNum = Integer.parseInt(timeStr.split("周")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_DAY * 7);

        }else if (timeStr.matches("\\d*个月前")){
            int timeNum = Integer.parseInt(timeStr.split("个")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_DAY * 31);
        }else {
            return 0;

        }
    }

    public static Map<String, Object> copyExtras(Map<String,Object> inExtras){
        Map<String,Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(),entry.getValue());
        }
        return extras;
    }


    private static List<String> agentList = new ArrayList<>();

    static {
        agentList.add("Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)");
        agentList.add("Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.28.3 (KHTML, like Gecko) Version/3.2.3 ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/525.28.3");
        agentList.add("Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16");
        agentList.add("Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14");
        agentList.add("Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14");
        agentList.add("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14");
        agentList.add("Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02");
        agentList.add("Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0");
        agentList.add("Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.13 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400");
    }

    private static String getRandomUA() {
        return agentList.get(RandomUtils.nextInt(0, agentList.size() - 1));
    }
}
