package com.chance.cc.crawler.development.scripts.pcauto.hj;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.pipeline.result.CrawlerDataBuilder;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.BufferedReader;
import java.io.FileReader;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.*;
import static org.apache.commons.lang3.time.DateUtils.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/3/3 18:14
 * @Description 太平洋号 板块
 **/
public class PcautoHjCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(PcautoHjCrawlerScript.class);
    private static final String DOMAIN = "pcauto";
    private static final String SITE = "hj";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";
    private static final String CRAWLER_MODULE_LIST = "crawlerModule";
    private static final String COMMENT_FILTER_INFO = "comment_filter_info";
    private static final String FILE_PATH = "filePath";

    private static final String ENTRANCE_URL = "https://hj.pcauto.com.cn/";
    private static final String MODULE_SOURCE_URL = "https://hj.pcauto.com.cn/api/articles/tag/%s?pNo=1&pSize=10&sortKey=0";
    private static final String MODULE_URL = "https://hj.pcauto.com.cn/api/articles/tag/\\d+\\?pNo=\\d+&pSize=\\d+&sortKey=0";
    private static final String ITEM_VIDEO_SOURCE_URL = "https://hj.pcauto.com.cn/video/%s";
    private static final String ITEM_ARTICLE_SOURCE_URL = "https://hj.pcauto.com.cn/article/%s";
    private static final String ITEM_VIDEO_URL = "https://hj.pcauto.com.cn/video/\\d+";
    private static final String ITEM_ARTICLE_URL = "https://hj.pcauto.com.cn/article/\\d+";
    private static final String COMMENT_SOURCE_URL = "https://cmt.pcauto.com.cn/action/comment/list_new_json.jsp?urlHandle=1&url=%s&pageSize=10&pageNo=1";
    private static final String COMMENT_URL = "https://cmt.pcauto.com.cn/action/comment/list_new_json.jsp\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(MODULE_URL);
        addUrlRegular(ITEM_ARTICLE_URL);
        addUrlRegular(ITEM_VIDEO_URL);
        addUrlRegular(COMMENT_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return SITE.equals(site);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> records = new ArrayList<>();

        String requestUrl = requestRecord.getHttpRequest().getUrl();
        if (requestUrl.matches(ENTRANCE_URL)) {
            if ((requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().get(CRAWLER_MODULE_LIST)) != null) {
                List<String> crawlerModule = (List<String>) requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(CRAWLER_MODULE_LIST).getVal();
                requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(CRAWLER_MODULE_LIST);
                for (String module : crawlerModule) {
                    String url = String.format(MODULE_SOURCE_URL, module);
                    CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                            .turnPageRequest(requestRecord)
                            .httpUrl(url)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .copyBizTags()
                            .copyResultTags()
                            .build();
                    List<String> modulePath = new ArrayList<>();
                    String moduleName = "";
                    if ("92".equals(module)) {
                        moduleName = "评测体验";
                    } else if ("1".equals(module)) {
                        moduleName = "海选导购";
                    } else if ("2".equals(module)) {
                        moduleName = "用车技术";
                    } else if ("91".equals(module)) {
                        moduleName = "新车资讯";
                    } else if ("41".equals(module)) {
                        moduleName = "行业分析";
                    }
                    modulePath.add(moduleName);
                    record.tagsCreator().bizTags().addCustomKV(Field_Path, modulePath);
                    records.add(record);
                }
            }

            //通过文件单独采集文章或者视频
            if(requestRecord.getHttpRequest().getExtras() != null){
                String filePath = (String)requestRecord.getHttpRequest().getExtras().get(FILE_PATH);
                try {
                    BufferedReader bufferedReader = new BufferedReader(new FileReader(filePath));
                    String url = "";
                    //按行读取文件
                    while ((url = bufferedReader.readLine()) != null){
                        if(StringUtils.isBlank(url)){
                            continue;
                        }

                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(requestRecord)
                                .httpUrl(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .copyBizTags()
                                .copyResultTags()
                                .build();
                        records.add(itemRecord);
                    }
                } catch (Exception e) {
                    log.error(e.getMessage());
                }
            }
        }

        return records;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() != 200) {
            log.error("url [{}] download error!status is [{}]",requestUrl,httpPage.getStatusCode());
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            if(httpPage.getStatusCode() == 503){
                try {
                    Thread.sleep(5000000);
                } catch (InterruptedException e) {
                    log.error(e.getMessage());
                }
            }
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (requestUrl.matches(MODULE_URL)) {
            moduleUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(ITEM_ARTICLE_URL) || requestUrl.matches(ITEM_VIDEO_URL)) {
            itemUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(COMMENT_URL)) {
            commentUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }
        return parsedLinks;
    }

    private void moduleUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        try{
            String code = httpPage.getJson().jsonPath($_type + ".code").get();
            if (!"200".equals(code)) {
                log.error("module url [{}] download error! will retry", crawlerRequestRecord.getHttpRequest().getUrl());
                requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
                crawlerRequestRecord.setNeedWashPage(false);
                return;
            }
        }catch (Exception e){
            log.error("download error!will retry");
            requestAgainCrawlerRecord(parsedLinks,crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        //翻页
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if ("pNo".equals(name)) {
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }
        }
        nextUrl = nextUrl.substring(0, nextUrl.length() - 1);
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);

//        列表页解析
        List<String> all = httpPage.getJson().jsonPath($_type + ".data.list").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String articleId = jsonObject.getString("articleId");
            String articleType = jsonObject.getString("articleType");
            if (StringUtils.isBlank(articleId) || StringUtils.isBlank(articleType)) {
                log.error("articleId or articleType is null !");
                continue;
            }
            String itemUrl = "";
            if ("1".equals(articleType) || "2".equals(articleType)) {
                //视频
                itemUrl = String.format(ITEM_VIDEO_SOURCE_URL, articleId);
            } else if ("0".equals(articleType)) {
                //文章
                itemUrl = String.format(ITEM_ARTICLE_SOURCE_URL, articleId);
            }
            if (StringUtils.isBlank(itemUrl)) {
                log.error("itemUrl is null!json is {}",JSONObject.toJSONString(jsonObject));
                continue;
            }

            String releaseTime = jsonObject.getString("createTimeStamp");
            if (StringUtils.isBlank(releaseTime)) {
                log.error("releaseTime can not is null!");
                continue;
            }

            long releaseTimeToLong = Long.parseLong(releaseTime + "000");
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .copyResultTags()
                    .copyBizTags()
                    .build();
            String siteBiz = itemRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site_biz");
            if (StringUtils.isNotBlank(siteBiz)) {
                if ("1".equals(articleType) || "2".equals(articleType)) {
                    itemRecord.tagsCreator().bizTags().addSiteBiz("hao_video-" + siteBiz);
                } else if ("0".equals(articleType)) {
                    itemRecord.tagsCreator().bizTags().addSiteBiz("hao_article-" + siteBiz);
                }
            }else{
                log.error("siteBiz can not is null!");
                return;
            }
            String elite = "1".equals(jsonObject.getString("isRecommend")) ? "是" : "否";
            itemRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Is_Elite, elite);
            parsedLinks.add(itemRecord);
        }
    }

    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String title = httpPage.getHtml().xpath("//title").get();
        if (StringUtils.isNotBlank(title) && "找不到页面".equals(title)) {
            log.error("[{}] page is not exist!", requestUrl);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        //内下载，获取评论数
        String url = String.format(COMMENT_SOURCE_URL, requestUrl + ".html");
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .needWashed(false)
                .needParsed(false)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(record);


        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (resultTags.hasDataType(comment)) {
            resultTags.getCategoryTag().removeLabelTag("comment");
            CrawlerRequestRecord commentFilter = null;
            if ((commentFilter = resultTags.getCategoryTag().getKVTagObjVal(COMMENT_FILTER_INFO, CrawlerRequestRecord.class)) == null) {
                log.error("comment filter info can not is null!");
                return;
            }
            String commentUrl = String.format(COMMENT_SOURCE_URL, requestUrl + ".html");
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .needWashed(true)
                    .resultLabelTag(comment)
                    .build();
            commentRecord.setFilter(commentFilter.getFilter());
            commentRecord.setFilterInfos(commentFilter.getFilterInfos());
            commentRecord.getHttpRequest().addExtra("articleUrl", requestUrl);
            if (StringUtils.isNotBlank(commentRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(Tag_Field_Is_Elite))) {
                commentRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(Tag_Field_Is_Elite);
            }
            parsedLinks.add(commentRecord);
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String error = JSONObject.parseObject(httpPage.getRawText()).getString("error");
        if(StringUtils.isNotBlank(error) && "topic not found".equals(error)){
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        String count = "0";
        try {
            count = httpPage.getJson().jsonPath($_type + ".total").get();
        } catch (Exception e) {
            log.error("[{}] comment url download error!", crawlerRequestRecord.getHttpRequest().getUrl());
            requestAgainCrawlerRecord(parsedLinks,crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        //评论翻页
        String requestUrl = httpPage.getRequest().getUrl();
        int pageNo = 0;
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if ("pageNo".equals(name)) {
                pageNo = Integer.parseInt(value);
                nextUrl = nextUrl + name + "=" + (pageNo + 1) + "&";
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }
        }
        nextUrl = nextUrl.substring(0, nextUrl.length() - 1);
        int totalPage = Integer.parseInt(httpPage.getJson().jsonPath($_type + ".pageCount").get());
        if (pageNo < totalPage) {
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .needWashed(true)
                    .copyResultTags()
                    .build();
            commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
            parsedLinks.add(commentRecord);
        }
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
            String comments = "0";
            try {
                comments = internalDownloadPage.getJson().jsonPath($_type + ".total").get();
            } catch (Exception e) {
                comments = "0";
            }
            crawlerRecord.getHttpRequest().addExtra("comments", comments);
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        String title = httpPage.getHtml().xpath("//h1/text()").get();
        String author = httpPage.getHtml().xpath("//div[@class=\"author\"]/a/text()").get();
        String authorId = httpPage.getHtml().xpath("//div[@class=\"author\"]/a/@href").get();
        authorId = authorId.matches("/writer/\\d+") ? authorId.substring(authorId.lastIndexOf("/") + 1) : "0";
        String authorFollows = httpPage.getHtml().xpath("//span[text()='人气值']/preceding-sibling::span/text()").get();
//        String authorWorks = httpPage.getHtml().xpath("//span[text()='作品']/preceding-sibling::span/text()").get();
        List<Map<String, String>> seriesList = new ArrayList<>();
        Map<String, String> series = new HashMap<>();
        CrawlerData crawlerData = null;
        if (itemUrl.matches(ITEM_ARTICLE_URL)) {
            String releaseTime = httpPage.getHtml().xpath("//span[@class=\"time\"]/text()").get();
            List<String> contents = httpPage.getHtml().xpath("//div[@class=\"art-content\"]/p//text()").all();
            StringBuffer contentBf = new StringBuffer();
            for (String data : contents) {
                contentBf.append(data).append(" ");
            }
            List<String> images = httpPage.getHtml().xpath("//div[@class=\"art-content\"]/p//img/@src").all();
            StringBuffer imageBf = new StringBuffer();
            for (String image : images) {
                if (!image.startsWith("http")) {
                    image = "https:" + image;
                }
                imageBf.append(image).append("\\0x1");
            }
            String seriesName = httpPage.getHtml().xpath("//div[@class=\"hj-car-detail hj-article-car\"]//span[@class=\"hj-content-title\"]").get();
            String seriesUrl = "https:" + httpPage.getHtml().xpath("//div[@class=\"hj-car-detail hj-article-car\"]//div[@class=\"car-detail-inner\"]/a/@href").get();
            series.put("series_url", seriesUrl);
            series.put("series_name", seriesName);
            seriesList.add(series);
            try {
                crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                        .url(itemUrl)
                        .releaseTime(washTime(releaseTime))
                        .addContentKV(Field_Title, getContentNoEs(title))
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Author_Id, authorId)
                        .addContentKV(Field_Content, getContentNoEs(contentBf.toString()))
                        .addContentKV(Field_Images, imageBf.toString())
                        .addContentKV(Field_Author_Follows, authorFollows)
                        .build();
                if (StringUtils.isNotBlank(seriesName) && StringUtils.isNotBlank(seriesUrl)) {
                    crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series, seriesList);
                }
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        } else if (itemUrl.matches(ITEM_VIDEO_URL)) {
            List<String> contents = httpPage.getHtml().xpath("//span[text()='简介 :']/following-sibling::span//text()").all();
            StringBuffer contentBf = new StringBuffer();
            for (String content : contents) {
                contentBf.append(content).append(" ");
            }
            List<String> topicType = httpPage.getHtml().xpath("//span[text()='标签 :']/following-sibling::span//text()").all();
            String seriesName = httpPage.getHtml().xpath("//div[@class=\"hj-car-detail hj-video-col-item\"]/div[@class=\"hj-common-title\"]/span/text()").get();
            series.put("series_name", seriesName);
            seriesList.add(series);
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_Title, getContentNoEs(title))
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Content, getContentNoEs(contentBf.toString()))
                    .addContentKV(Field_Author_Follows, authorFollows)
                    .build();

            if (StringUtils.isNotBlank(seriesName)) {
                crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series, seriesList);
            }
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type, topicType);
        }
        return crawlerData;
    }

    private CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        String comments = (String) crawlerRequestRecord.getHttpRequest().getExtras().get("comments");
        CrawlerData crawlerData = null;
        if (itemUrl.matches(ITEM_ARTICLE_URL)) {
            String views = httpPage.getHtml().xpath("//span[@class=\"view\"]/text()").get();
            views = views.matches("浏览：\\S+") ? views.split("：")[1] : "0";
            String releaseTime = httpPage.getHtml().xpath("//span[@class=\"time\"]/text()").get();
            try {
                crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), articleKey))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                        .url(itemUrl)
                        .releaseTime(washTime(releaseTime))
                        .addContentKV(Field_I_Comments, washNum(comments))
                        .addContentKV(Field_I_Views, washNum(views))
                        .build();
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        } else if (itemUrl.matches(ITEM_VIDEO_URL)) {
            String likes = httpPage.getHtml().xpath("//i[@class=\"v-like\"]/parent::div/text()").get();
            likes = StringUtils.isNotBlank(likes) ? likes.trim() : "0";
            String views = httpPage.getHtml().xpath("//i[@class=\"v-view\"]/parent::div/text()").get();
            views = StringUtils.isNotBlank(views) ? views.trim() : "0";
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_I_Likes, washNum(likes))
                    .addContentKV(Field_I_Views, washNum(views))
                    .addContentKV(Field_I_Comments, washNum(comments))
                    .build();
        }
        return crawlerData;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("/") + 1);
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        List<String> allReplies = httpPage.getJson().jsonPath($_type + ".data").all();
        for (String data : allReplies) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String commentId = jsonObject.getString("id");
            String author = jsonObject.getString("nickName");
            String authorId = jsonObject.getString("userId");
            String releaseTime = jsonObject.getString("createTime");
            String content = jsonObject.getString("content");
            String floor = jsonObject.getString("floor");
            JSONArray imageList = jsonObject.getJSONArray("picUrlList");
            StringBuffer imageBf = new StringBuffer();
            for (Object image : imageList) {
                String url = (String) image;
                if (StringUtils.isBlank(url)) {
                    continue;
                }
                imageBf.append(url).append("\\x01");
            }
            String likes = jsonObject.getString("support");

            JSONObject replyRef = jsonObject.getJSONObject("replyRef");
            CrawlerData crawlerData = null;
            try {
                CrawlerDataBuilder comment = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.comment.enumVal(), commentId))
                        .resultLabelTag(valueOf("comment"))
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .url(itemUrl)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Author_Id, authorId)
                        .addContentKV(Field_Content, getContentNoEs(content))
                        .addContentKV(Field_Images, imageBf.toString())
                        .addContentKV(Field_I_Likes, washNum(likes))
                        .addContentKV(Field_Floor, floor);

                if (replyRef != null) {
                    String refFloor = replyRef.getString("floor");
                    comment.addContentKV(Field_Reply_Floor, refFloor);
                }
                crawlerData = comment.build();
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
            crawlerDataList.add(crawlerData);
        }
        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static long washTime(String time) throws ParseException {
        long releaseTimeToLong = 0;
        if (StringUtils.isBlank(time)) {
            return releaseTimeToLong;
        }

        long currentTime = System.currentTimeMillis();
        if ("刚刚".equals(time)) {
            releaseTimeToLong = currentTime;
        }else if(time.matches("\\d+秒前")){
            String num = time.split("秒")[0];
            releaseTimeToLong = currentTime - Integer.parseInt(num) * MILLIS_PER_SECOND;
        } else if (time.matches("\\d+分钟前")) {
            String num = time.split("分钟")[0];
            releaseTimeToLong = currentTime - Integer.parseInt(num) * MILLIS_PER_MINUTE;
        } else if (time.matches("\\d+小时前")) {
            String num = time.split("小时")[0];
            releaseTimeToLong = currentTime - Integer.parseInt(num) * MILLIS_PER_HOUR;
        } else if (time.matches("\\d{4}-\\d{2}-\\d{2}")) {
            releaseTimeToLong = DateUtils.parseDate(time, "yyyy-MM-dd").getTime();
        }
        return releaseTimeToLong;
    }

    private static String washNum(String num) {
        String lastNum = "0";
        if (StringUtils.isBlank(num)) {
            return lastNum;
        }

        if (num.contains("万")) {
            lastNum = String.valueOf(Double.parseDouble(num.split("万")[0]) * 10000).split("\\.")[0];
        } else {
            lastNum = num;
        }
        return lastNum;
    }

    /**
     * 获取未转义的字符的字符串
     * @param str
     * @return
     */
    public static String getContentNoEs(String str){
        int times = 0;
        while (str.contains("&") && str.contains(";")){
            str = StringEscapeUtils.unescapeHtml(str);
            times ++;
            if (times > 5){
                break;
            }
        }
        return str;
    }

    /**
     * 从地址中获取参数与参数值
     * @param requestUrl
     * @return
     */
    private Map<String, String> getMapFormUrl(String requestUrl) {
        Map<String, String> map = new HashMap<>();
        String[] split = requestUrl.split("\\?");
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            map.put(name, value);
        }
        return map;
    }


    public static void main(String[] args) {
        System.out.println(washNum("7.09万"));
    }

}
