package com.chance.cc.crawler.development.scripts.pcauto.article;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2021-01-11 13:56:33
 * @email okprog@sina.com
 */
public class PcAutoArticleCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(PcAutoArticleCrawlerScript.class);

    private static final String indexRegex = "https?://www\\.pcauto\\.com\\.cn/sitemap/sitemap\\.html";
    private static final String listRegex = "https?://www\\.pcauto\\.com\\.cn/\\w*/\\S*/";
    private static final String pagingRegex = "https?://www\\.pcauto\\.com\\.cn/\\w*/\\S*/index_\\d*\\.html";
    private static final String articleRegex = "https?://www\\.pcauto\\.com\\.cn/\\w*/\\d*/\\d*\\.html";
    private static final String allPageUrlRegex = "https?://www\\.pcauto\\.com\\.cn/\\w*/\\d*/\\d*_all\\.html\\S*";
    private static final String cmtCountUrlRegex = "https?://cmt\\.pcauto\\.com\\.cn/action/topic/get_data\\.jsp\\?url=\\S*";
    private static final String commentRegex = "https?://cmt\\.pcauto\\.com\\.cn/action/comment/list_good_json\\.jsp\\S*";
    private static final String authorInfoUrlRegex = "https://bip\\.pcauto\\.com\\.cn/intf/focus\\.jsp\\?act=getFocusByNum&accountId=\\d*";

    public static final String authorInfoUrlFormat = "https://bip.pcauto.com.cn/intf/focus.jsp?act=getFocusByNum&accountId=%s";
    public static final String cmtCountUrlFormat = "https://cmt.pcauto.com.cn/action/topic/get_data.jsp?url=%s";
    public static final String commentUrlFormat = "https://cmt.pcauto.com.cn/action/comment/list_good_json.jsp?encodeHtml=1&urlHandle=1&url=%s&pageNo=%s&pageSize=30&voteId=0";

    private static final String COMMENT="https://cmt.pcauto.com.cn/action/comment/list_new_comment.jsp\\S*";
    private static final String Comment="https://cmt.pcauto.com.cn/action/comment/list_new_comment.jsp?encodeHtml=1&urlHandle=1&url=%s&pageNo=%s&pageSize=15&voteId=0";

    private static final String scriptSite = "article";


    @Override
    public String domain() {
        return "pcauto";
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(COMMENT);
        addUrlRegular(indexRegex);
        addUrlRegular(listRegex);
        addUrlRegular(pagingRegex);
        addUrlRegular(articleRegex);
        addUrlRegular(commentRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        HttpRequest httpRequest = requestRecord.getHttpRequest();
        if(requestRecord.tagsCreator().bizTags().hasKVTag(Tag_Field_Domain_Result_Json)){
            KVTag domainResultJson = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(Tag_Field_Domain_Result_Json);
            CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()),CrawlerDomainUrls.class);
            String url = crawlerDomainUrls.getUrl();
            JSONObject contentObj = JSONObject.parseObject(url);

            String articleUrl = contentObj.getString("url");
            String type1st = contentObj.getString("type1st");
            String type2nd = contentObj.getString("type2nd");
            List<String> tags = new ArrayList<>();
            tags.add(type1st);
            tags.add(type2nd);
            requestRecord.tagsCreator().bizTags().addCustomKV(Field_Path,tags);
            requestRecord.setReleaseTime(System.currentTimeMillis());
            requestRecord.setNeedParsedPage(true);
            requestRecord.setNeedWashPage(false);
            httpRequest.setUrl(articleUrl);
            requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(Tag_Field_Domain_Result_Json); //移除
        }
        return super.prepareRequest(requestRecord, supportSourceRecords);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        int statusCode = httpPage.getStatusCode();
        String lastRequestUrl = lastRequest.getUrl();
        if (StringUtils.isBlank(httpPage.getRawText()) || !httpPage.isDownloadSuccess() || (statusCode != 200 && statusCode != 404)){
            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            return parsedLinks;
        }
        if (statusCode == 404){
            logger.error("页面不存在：" + statusCode);
            return parsedLinks;
        }
        if (lastRequestUrl.matches(listRegex) || lastRequestUrl.matches(pagingRegex)){
            return parseListLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequestUrl);
        }
        if (lastRequestUrl.matches(articleRegex)){
            return parseArticleLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(commentRegex)){
            return parseCommentLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(COMMENT)){
            return  getCommentTurn(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> getCommentTurn(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String availablePageNo = httpPage.getJson().jsonPath($_type + ".pageCount").get();
        Integer sum = Integer.valueOf(availablePageNo);
        KVTag pageNo = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("pageNo");
        Integer val = (Integer) pageNo.getVal();
        val  = val +1;
        if (sum < val){
            return parsedLinks;
        }
        String url = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("url");
        String commentUrl = String.format(Comment,url,val);
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .recordKey(commentUrl + "comment")
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .needWashed(true)
                .needParsed(true)
                .notFilterRecord()
                .resultLabelTag(comment)
                // .resultLabelTag(interaction)
                .build();
        commentRecord.tagsCreator().bizTags().addCustomKV("pageNo",val);
        commentRecord.tagsCreator().bizTags().addCustomKV("url",url);
        parsedLinks.add(commentRecord);
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        int pageCount = pageObj.getIntValue("pageCount");
        int pageNo = pageObj.getIntValue("pageNo");
        if (pageNo < pageCount){
            int nextPageNo = pageNo + 1;
            String articleUrl = (String) extras.get("articleUrl");
            String commentUrl = String.format(commentUrlFormat,articleUrl,nextPageNo);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .needWashed(true)
                    .needParsed(true)
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .build();
            Map<String, Object> cmtExtras = copyExtras(extras);
            commentRecord.getHttpRequest().setExtras(cmtExtras);
            parsedLinks.add(commentRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setInternalDownloadPage(httpPage);
        Map<String, Object> extras = new HashMap<>();
        crawlerRequestRecord.getHttpRequest().setExtras(extras);
        String allPageUrl = httpPage.getHtml().xpath("//div[@id=\"JallpageTop\"]/a/@href").get();
        if (StringUtils.isNotBlank(allPageUrl)){
            allPageUrl = "https:" +  allPageUrl.split("\\?")[0];
            CrawlerRequestRecord allPageRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(allPageUrl)
                    .recordKey(allPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            parsedLinks.add(allPageRecord);
        }
        //内下载作者信息页
        String pubTime =httpPage.getHtml().xpath("//span[@id=\"pubtime_baidu\"]/text()").get();
        try {
            crawlerRequestRecord.setReleaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime());
        } catch (ParseException e) {
            logger.error("parse date error",e);
        }
        String uid = httpPage.getHtml().xpath("//a[@id=\"addFocus\"]/@data-uid").get();
        if (StringUtils.isNotBlank(uid)){
            String authorInfoUrl = String.format(authorInfoUrlFormat,uid);
            CrawlerRequestRecord authorInfoRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(authorInfoUrl)
                    .recordKey(authorInfoUrl)
                    .releaseTime(System.currentTimeMillis())
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            parsedLinks.add(authorInfoRecord);
        }

        //内下载评论数
        String cmtCountUrl = String.format(cmtCountUrlFormat,crawlerRequestRecord.getHttpRequest().getUrl().split(":")[1]);
        CrawlerRequestRecord cmtCountRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(cmtCountUrl)
                .recordKey(cmtCountUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(cmtCountRecord);

        //评论翻页
       // String commentUrl = String.format(commentUrlFormat,crawlerRequestRecord.getHttpRequest().getUrl(),1);
        String commentUrl = String.format(Comment,crawlerRequestRecord.getHttpRequest().getUrl(),1);
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .recordKey(commentUrl + "comment")
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .needParsed(true)
                .notFilterRecord()
                .resultLabelTag(comment)
               // .resultLabelTag(interaction)
                .build();
        commentRecord.tagsCreator().bizTags().addCustomKV("pageNo",1);
        commentRecord.tagsCreator().bizTags().addCustomKV("url",crawlerRequestRecord.getHttpRequest().getUrl());
        Map<String, Object> cmtExtras = copyExtras(extras);
        cmtExtras.put("articleUrl",crawlerRequestRecord.getHttpRequest().getUrl());
        commentRecord.getHttpRequest().setExtras(cmtExtras);
        parsedLinks.add(commentRecord);
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, String lastRequestUrl) {
        //下一页
        String nextPageUrl = "";
        if (lastRequestUrl.matches(listRegex)){
            nextPageUrl = lastRequestUrl + "index_1.html";
        }
        if (lastRequestUrl.matches(pagingRegex)){
            int currentPage = Integer.parseInt(lastRequestUrl.split("_")[1].split("\\.")[0]);
            nextPageUrl = lastRequestUrl.split("_")[0] + "_" + (currentPage + 1) + ".html";
        }
        if (StringUtils.isNotBlank(nextPageUrl) && !lastRequestUrl.contains("youji")){
            CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(nextPageRecord);
        }
        List<Selectable> normalNodes = httpPage.getHtml().xpath("//div[@class=\"box-bd\"]/div").nodes();
        if (null != normalNodes && normalNodes.size() > 0){
            for (Selectable normalNode : normalNodes) {
                String itemUrl = "https:" + normalNode.xpath("./a/@href").get();
                if (itemUrl.contains("pcvideo")){
                    continue;
                }
                String pubTime = normalNode.xpath("./div/p[@class=\"other\"]/span[@class=\"time\"]/text()").get();

                try {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(itemUrl)
                            .recordKey(itemUrl)
                            .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd").getTime())
                            .copyBizTags()
                            .resultLabelTag(article)
                            .resultLabelTag(interaction)
                            .build();
                    parsedLinks.add(itemRecord);
                }catch (Exception e){
                    logger.error("parse date error", e);
                }
            }
        }else {
            List<Selectable> travelNodes = httpPage.getHtml().xpath("//ul[@class=\"ulPic\"]/li").nodes();
            for (Selectable travelNode : travelNodes) {
                String itemUrl = "https:" + travelNode.xpath("./a/@href").get();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                parsedLinks.add(itemRecord);
            }
        }
        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String lastRequestUrl = httpRequest.getUrl();
        if (lastRequestUrl.matches(articleRegex)){
            for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
                String internalUrl = internalDownloadRecord.getHttpRequest().getUrl();
                if (internalUrl.matches(allPageUrlRegex)){
                    HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
                    crawlerRequestRecord.setInternalDownloadPage(internalDownloadPage);
                }
                if (internalUrl.matches(authorInfoUrlRegex)){
                    String follows = internalDownloadRecord.getInternalDownloadPage().getRawText().trim();
                    extras.put("follows",follows);
                }
                if (internalUrl.matches(cmtCountUrlRegex)){
                    HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
                    JSONObject pageObj = JSONObject.parseObject(internalDownloadPage.getRawText());
                    String comments = pageObj.getString("total");
                    extras.put("comments",comments);
                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            crawlerDataList.addAll(washArticle(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
            crawlerDataList.addAll(washInteraction(crawlerRequestRecord,httpPage));
        }

        //if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)){
            //crawlerDataList.addAll(washComment(crawlerRequestRecord,httpPage));
            if (url.matches(COMMENT)){
                washCommentTurn(crawlerRequestRecord,httpPage,crawlerDataList);
            }
       // }

        return crawlerDataList;
    }

    private void washCommentTurn(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerData> crawlerDataList) {
        List<String> all = httpPage.getJson().jsonPath($_type + ".data").all();
        for (String str : all){
            JSONObject jsonObject = JSONObject.parseObject(str);
            String createTime = jsonObject.getString("createTime");
            long time = 0l;
            try {
                time = DateUtils.parseDate(createTime, "yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                e.printStackTrace();
            }
            if (!isDateRange(crawlerRequestRecord,time)){
                continue;
            }
            String id = jsonObject.getString("id");
            String nickName = jsonObject.getString("nickName");
            String userId = jsonObject.getString("userId");
            String content = jsonObject.getString("content");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .dataId(StringUtils.joinWith("-",crawlerRequestRecord.getDomain(), comment,id))
                    .url(crawlerRequestRecord.getHttpRequest().getUrl())
                    .releaseTime(time)
                    .resultLabelTag(comment)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .addContentKV(Field_Author,nickName)
                    .addContentKV(Field_Author_Id,userId)
                    .addContentKV(Field_Content,content)
                    .build();
            crawlerData.tagsCreator().bizTags().addSiteBiz("news");
            crawlerDataList.add(crawlerData);
        }
    }
    /**
     * 判断是否在时间范围内
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord,Long releaseTimeToLong){
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    }else if(hourFromNow != 0){
                        endTime = System.currentTimeMillis()-60000;//系统时间减去一分钟
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if(startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime){
                isRange = true;
            }
        }else{
            isRange = true;
        }
        return isRange;
    }
    private List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = lastRequest.getExtras();
        CrawlerBusinessTags bizTags = crawlerRequestRecord.tagsCreator().bizTags();
        CategoryTag categoryTag = bizTags.getCategoryTag();
        Html html = crawlerRequestRecord.getInternalDownloadPage().getHtml();

        String articleKey = lastRequest.getUrl().substring(lastRequest.getUrl().lastIndexOf("/") + 1).split("\\.")[0];
        String title = html.xpath("//h1[@class=\"tit\"]/text()").get();

        List<String> allContents = html.xpath("//div[@class=\"artText clearfix\"]//p//text()").all();
        StringBuffer sbContent = new StringBuffer();
        for (String s : allContents) {
            sbContent = sbContent.append(s).append(" ");
        }
        //拼接图片地址
        List<String> allImages = html.xpath("//div[@class=\"artText clearfix\"]//p//img//@src1").all();
        StringBuffer sbImages = new StringBuffer();
        for (String s : allImages) {
            sbImages = sbImages.append(s).append(",");
        }

        String author = html.xpath("//p[@class=\"name\"]/a/span[1]/text()").get();
        if (StringUtils.isBlank(author)){
            author = html.xpath("//span[@class='editor']/a/text()").get();
            if (StringUtils.isBlank(author)){
                author = html.xpath("//span[@class='editor']/text()").get();
                if (author.contains("：")){
                    author = author.split("：")[1];
                }
            }
        }
        author = author.trim();
        String follows = (String) extras.get("follows");
        if (StringUtils.isBlank(follows)){
            follows = "";
        }

        String authorId = html.xpath("//a[@id=\"addFocus\"]/@data-uid").get();
        if (StringUtils.isBlank(authorId)){
            authorId = "";
        }

        String pubTime = html.xpath("//span[@id=\"pubtime_baidu\"]/text()").get();

        List<String> topicType = html.xpath("//p[@class=\"moreRead artTag\"]/a/text()").all();
        if (null != topicType && topicType.size() > 0){
            categoryTag.addKVTag(Tag_Field_Topic_Type,topicType);
        }

        //添加文章车系信息标签
        List<Map<String,String>> seriesList = new ArrayList<>();
        List<Selectable> seriesNodes = html.xpath("//div[@id=\"JserialFull\"]/div/div[@class=\"serial-item active\"]").nodes();
        if (null != seriesNodes && seriesNodes.size() > 0){
            for (Selectable seriesNode : seriesNodes) {
                String seriesName = seriesNode.xpath("./div/div/div[@class='serial-content']/div[@class='serial-m']/div[@class='serial-info']/a[@class='serial-name']/text()").get();

                String seriesUrl = seriesNode.xpath("./div/div/div[@class='serial-content']/div[@class='serial-m']/div[@class='serial-info']/a[@class='serial-name']/@href").get();
                seriesUrl = seriesUrl.split("#")[0];
                String seriesId = seriesUrl.split("cn/")[1].split("/")[0];

                Map<String, String> seriesInfo = new HashMap<>();
                seriesInfo.put("series_name",seriesName);
                seriesInfo.put("series_url",seriesUrl);
                seriesInfo.put("series_id",seriesId);
                seriesList.add(seriesInfo);
            }
            categoryTag.addKVTag(Tag_Field_Series,seriesList);
        }

        try {
            String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
            CrawlerData crawlerArticleData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .url(lastRequest.getUrl())
                    .releaseTime(DateUtils.parseDate(pubTime, "yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author_Follows, follows)
                    .addContentKV(Field_Content, sbContent.toString())
                    .addContentKV(Field_Images, sbImages.toString())
                    .resultLabelTag(article)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .build();
            crawlerArticleData.tagsCreator().bizTags().addSiteBiz("news");
            crawlerArticleDataList.add(crawlerArticleData);
        } catch (ParseException e) {
            logger.error(e.getMessage(),"parse date error");
        }
        return crawlerArticleDataList;
    }

    private List<CrawlerData> washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerInteractionDataList = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        Map<String, Object> extras = lastRequest.getExtras();
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String articleKey = lastRequest.getUrl().substring(lastRequest.getUrl().lastIndexOf("/") + 1).split("\\.")[0];

        if (lastRequestUrl.matches(articleRegex)){
            String comments = (String)extras.get("comments");
            String pubTime = crawlerRequestRecord.getInternalDownloadPage().getHtml().xpath("//span[@id=\"pubtime_baidu\"]/text()").get();
            try {
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), articleKey))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                        .url(lastRequestUrl)
                        .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                        .addContentKV(Field_I_Comments,String.valueOf(comments))
                        .resultLabelTag(interaction)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .build();
                crawlerData.tagsCreator().bizTags().addSiteBiz("news");
                crawlerInteractionDataList.add(crawlerData);
            }catch (Exception e){
                logger.error(e.getMessage(),e);
            }
        }
        if (lastRequestUrl.matches(commentRegex)){
            JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
            JSONArray commentList = jsonObject.getJSONArray("data");
            for (Object o : commentList) {
                JSONObject commentJson = (JSONObject)o;
                String commentId = commentJson.getString("id");
                String likes = commentJson.getString("support");
                String pubTime = commentJson.getString("createTime");

                try {
                    CrawlerData crawlerData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), commentId))
                            .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                            .url(lastRequest.getUrl())
                            .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                            .addContentKV(Field_I_Likes,likes)
                            .resultLabelTag(interaction)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .build();
                    crawlerData.tagsCreator().bizTags().addSiteBiz("news");
                    crawlerData.setFilter(CrawlerEnum.CrawlerRecordFilter.keyOrDateRange);
                    crawlerInteractionDataList.add(crawlerData);
                } catch (ParseException e) {
                    logger.error(e.getMessage(),"parse date error");
                }
            }
        }
        return crawlerInteractionDataList;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerCommentDataList = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = lastRequest.getExtras();
        JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
        JSONArray commentList = jsonObject.getJSONArray("data");
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String articleUrl = (String) extras.get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1).split("\\.")[0];
        for (Object o : commentList) {
            JSONObject commentJson = (JSONObject)o;
            String commentId = commentJson.getString("id");
            String pubTime = commentJson.getString("createTime");
            String author = commentJson.getString("nickName");
            String authorId = commentJson.getString("userId");
            String content = commentJson.getString("content");
            String floor = commentJson.getString("floor");
            try {
                CrawlerData crawlerCommentData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                        .url(articleUrl)
                        .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                        .addContentKV(Field_Author,author)
                        .addContentKV(Field_Author_Id,authorId)
                        .addContentKV(Field_Content,content)
                        .addContentKV(Field_Floor,floor)
                        .resultLabelTag(comment)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .build();
                crawlerCommentData.tagsCreator().bizTags().addSiteBiz("news");
                crawlerCommentData.setFilter(CrawlerEnum.CrawlerRecordFilter.keyOrDateRange);
                crawlerCommentDataList.add(crawlerCommentData);
            } catch (ParseException e) {
                logger.error(e.getMessage(),"parse date error");
            }
        }
        return crawlerCommentDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    public static Map<String, Object> copyExtras(Map<String,Object> inExtras){
        Map<String,Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(),entry.getValue());
        }
        return extras;
    }
}
