package com.chance.cc.crawler.development.scripts.bitauto.yichehao;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.DigestUtils;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.text.ParseException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Field_Author_Follows;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Series;

/**
 * @author lt
 * @version 1.0
 * @date 2020-12-31 14:34:56
 * @email okprog@sina.com
 */
public class BitAutoHaoArticleCrawlerScript extends CrawlerCommonScript {

    private static Logger logger = LoggerFactory.getLogger(BitAutoHaoArticleCrawlerScript.class);
    private static String bitautoPrefix = "https://news.yiche.com";

    //单次采集共享去重key池
    private static ConcurrentHashMap<String, Object> localKeysMap = new ConcurrentHashMap<>();

    private static final String indexRegex = "https?://hao\\.yiche\\.com/";
    private static final String indexFileRegex = "https?://hao\\.yiche\\.com/#file";
    private static final String indexComRegex = "https?://hao\\.yiche\\.com/#comment";
    private static final String listUrlRegex = "https?://hao\\.yiche\\.com/article/articlelist\\S*";
    private static final String articleUrlRegex = "https?://news\\.yiche\\.com/hao/wenzhang/\\S*";
    private static final String commentUrlRegex = "https?://newsapi\\.yiche\\.com/comment/comment/getdata\\S*";
    private static final String likesUrlRegex = "https://news\\.yiche\\.com/web_api/information_api/api/v1/support/support_info\\S*";
    private static final String viewsUrlRegex = "https://newsapi\\.yiche\\.com/promotion-api/traffic/news/pv-total\\?ids=\\d*";
    private static final String followsUrlRegex = "https://news\\.yiche\\.com/web_api/user_center_api/api/v1/user/get_author_info_by_id\\S*";
    private static final String allArticleListRegex = "http[s]*://news.yiche.com/info/categoryId0_p0_l0_f0_g0_c0_b0_\\d+.html";

    private static final String listUrlFormat = "https://hao.yiche.com/article/articlelist?pageindex=%s&pagesize=50&_=%s";
    private static final String listUrlFormat2 = "https://hao.yiche.com/article/articlelist?pageindex=%s&pagesize=50";
    private static final String commentUrlFormat = "https://newsapi.yiche.com/comment/comment/getdata?productId=%s&objectId=%s&pageIndex=%s&pageSize=%s&isHot=false&_=%s";
    private static final String likesUrlFormat = "https://news.yiche.com/web_api/information_api/api/v1/support/support_info?cid=508&param=%7B%22contentId%22%3A%22#contentId%22%2C%22contentType%22%3A%22#contentType%22%7D";
    private static final String viewsUrlFormat = "https://newsapi.yiche.com/promotion-api/traffic/news/pv-total?ids=%s";
    private static final String followsUrlFormat = "https://news.yiche.com/web_api/user_center_api/api/v1/user/get_author_info_by_id?cid=508&param=%7B%22userId%22%3A%22#uid%22%7D";

    private static final String scriptSite = "hao_article";

    private static final long ONE_SECOND = 1000L;
    private static final long ONE_MINUTE = 60000L;
    private static final long ONE_HOUR = 3600000L;
    private static final long ONE_DAY = 86400000L;

    @Override
    public String domain() {
        return "bitauto";
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexRegex);
        addUrlRegular(indexFileRegex);
        addUrlRegular(indexComRegex);
        addUrlRegular(listUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentUrlRegex);
        addUrlRegular(allArticleListRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        HttpRequest httpRequest = requestRecord.getHttpRequest();
        if (requestRecord.tagsCreator().bizTags().hasKVTag(Tag_Field_Domain_Result_Json)) {
            KVTag domainResultJson = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(Tag_Field_Domain_Result_Json);
            CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()), CrawlerDomainUrls.class);
            String url = crawlerDomainUrls.getUrl();
            JSONObject contentObj = JSONObject.parseObject(url);
            String articleUrl = contentObj.getString(Field_Urls);
            Long releaseTime = contentObj.getLong("releaseTime");
            requestRecord.setReleaseTime(releaseTime);
            requestRecord.setNeedParsedPage(true);
            requestRecord.tagsCreator().resultTags().addResultDataType(article);
            requestRecord.tagsCreator().resultTags().addResultDataType(interaction);
            httpRequest.setUrl(articleUrl);
            requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(Tag_Field_Domain_Result_Json); //移除
        }
        return super.prepareRequest(requestRecord, supportSourceRecords);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (StringUtils.isBlank(httpPage.getRawText()) || !httpPage.isDownloadSuccess() || statusCode != 200) {
            crawlerRequestRecord.setNeedWashPage(false);
            if (statusCode == 404) {
                logger.info("status code is {},pass", statusCode);
                return parsedLinks;
            }
            logger.error("页面下载状态：{}，状态码：{}，内容为空：{}，实行回推", httpPage.isDownloadSuccess(), statusCode, StringUtils.isBlank(httpPage.getRawText()));
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        if (lastRequestUrl.matches(indexComRegex)) {
            logger.info("开始采集易车号文章追溯评论");
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                logger.error(e.getMessage());
            }
            return parsedLinks;
        }
        if (lastRequestUrl.matches(indexFileRegex)) {
            logger.info("开始采集易车号文章追溯评论,从文件初始文章链接");
            String filePath = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("filePath");
            List<String> urls = getContent(filePath);
            crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove("filePath");
            for (String url : urls) {
                CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .recordKey(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .needParsed(true)
                        .needWashed(true)
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                parsedLinks.add(requestRecord);
            }
            return parsedLinks;
        }
        if (lastRequestUrl.matches(indexRegex)) {
            return parseIndexLinks(crawlerRequestRecord, parsedLinks);
        }
        if (lastRequestUrl.matches(listUrlRegex)) {
            return parseListLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex)) {
            return parseItemLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequest, lastRequestUrl);
        }
        if (lastRequestUrl.matches(commentUrlRegex)) {
            return parseCommentLinks(crawlerRequestRecord, httpPage, parsedLinks, lastRequest);
        }
        if(lastRequestUrl.matches(allArticleListRegex)){
            parseAllArticleList(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return parsedLinks;
    }

    private void parseAllArticleList(CrawlerRequestRecord crawlerRequestRecord,HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        //翻页
        String nextUrl = httpPage.getHtml().xpath("//a[@data-current=\"next\"]/@href").get();
        if(StringUtils.isNotBlank(nextUrl)){
            nextUrl = bitautoPrefix + nextUrl;
            CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(listRecord);
        }

        //文章列表解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"article-card\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath(".//a[@class=\"figure\"]/@href").get();
            String time = node.xpath(".//span[@class=\"time\"]").get();
            if(StringUtils.isBlank(itemUrl) || StringUtils.isBlank(time)){
                continue;
            }
            itemUrl = bitautoPrefix + itemUrl;
            try{
                long releaseTime = cleanTime(time);
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTime)
                        .copyBizTags()
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                parsedLinks.add(itemRecord);
            }catch (Exception e){
                logger.error(e.getMessage());
            }
        }
    }

    private List<CrawlerRequestRecord> parseIndexLinks(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> parsedLinks) {
        logger.info("script crawler start , init local keys map");
        if (localKeysMap.size() > 0) {
            logger.info("last keys map size : {}, now clean up", localKeysMap.size());
            localKeysMap = new ConcurrentHashMap();
        }
        String timeStamp = String.valueOf(System.currentTimeMillis());
//        String listUrl = String.format(listUrlFormat,1,timeStamp);
        String listUrl = String.format(listUrlFormat2, 1);
        CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(listUrl)
                .recordKey(listUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .build();
        HttpRequest httpRequest = listRecord.getHttpRequest();
        httpRequest.addHeader("Host", "hao.yiche.com");
        httpRequest.addHeader("Referer", crawlerRequestRecord.getHttpRequest().getUrl());
        httpRequest.addHeader("User-Agent", getRandomUA());
        parsedLinks.add(listRecord);
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(lastRequestUrl);
        int pageindex = Integer.parseInt((String) urlParams.get("pageindex"));
//        String timeStamp = (String) urlParams.get("_");
//        long times = new Long(timeStamp);
//        times ++;
//        String nextPageUrl = String.format(listUrlFormat,(pageindex + 1),String.valueOf(times));
        String nextPageUrl = String.format(listUrlFormat2, (pageindex + 1));
        CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextPageUrl)
                .recordKey(nextPageUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();
        HttpRequest nextRequest = nextPageRecord.getHttpRequest();
        nextRequest.addHeader("Host", "hao.yiche.com");
        nextRequest.addHeader("Referer", lastRequest.getHeaders().get("Referer"));
        nextRequest.addHeader("User-Agent", getRandomUA());
        parsedLinks.add(nextPageRecord);

        //解析列表
        List<Selectable> itemNodes = httpPage.getHtml().xpath("//div[@class=\"comment clearfix\"]").nodes();
        if (itemNodes == null || itemNodes.size() == 0) {
            logger.error("download page has no items, RawText :{}", httpPage.getRawText());
            return parsedLinks;
        }
        logger.info("this page has {} items", itemNodes.size());
        for (Selectable itemNode : itemNodes) {
            String itemUrl = itemNode.xpath("./div[@class=\"content_box\"]/div/h4/a/@href").get();
            if (itemUrl.startsWith("//")) {
                itemUrl = "https:" + itemUrl;
            }
            long releaseTime = 0;
            try {
                String pubTime = itemNode.xpath("./div[@class=\"building_box\"]/p/text()").get();
                if (StringUtils.isBlank(pubTime)) {
                    logger.error("parse date get null");
                    continue;
                } else {
//                    logger.info("test print date is : {},url is : {}",pubTime,itemUrl);
                    releaseTime = cleanTime(pubTime);
                }
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(releaseTime)
                        .copyBizTags()
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                HttpRequest itemRequest = itemRecord.getHttpRequest();
                itemRequest.addHeader("Host", "news.yiche.com");
                itemRequest.addHeader("Referer", lastRequest.getHeaders().get("Referer"));
                itemRequest.addHeader("User-Agent", getRandomUA());
                //判断本地keys池是否存在key，存在设置当前record为notFilterRecord
                if (localKeysMap.get(itemRecord.getRecordKey()) == null) {
                    localKeysMap.put(itemRecord.getRecordKey(), itemRecord);
                } else {
                    itemRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                    itemRecord.setSkipPipeline(true);
                }
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                logger.error(e.getMessage());
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseItemLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest, String lastRequestUrl) {
        crawlerRequestRecord.setNeedWashPage(true);
        Map<String, Object> extras = new HashMap<>();
        crawlerRequestRecord.getHttpRequest().setExtras(extras);
        extras.put("articleUrl", lastRequestUrl);
        String topicId = "";
        if (lastRequestUrl.endsWith("/")) {
            String[] split = lastRequestUrl.split("/");
            topicId = split[split.length - 2];
        } else {
            topicId = lastRequestUrl.substring(lastRequestUrl.lastIndexOf("/") + 1);
        }
        Matcher viewMt = Pattern.compile("newsId:\\s*'\\d*',").matcher(httpPage.getRawText());
        while (viewMt.find()) {
            topicId = viewMt.group(0).split("'")[1];
        }
        // 获取contentType
        Matcher matcher = Pattern.compile("contentType:\\s*'\\d*'").matcher(httpPage.getRawText());
        String contentType = "";
        while (matcher.find()) {
            contentType = matcher.group(0).split("'")[1];
        }
        String productId = "21".equals(contentType) || "23".equals(contentType) ? "8" : "1";
        extras.put("topicId", topicId);

        boolean hasDataType = crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article);
        if (hasDataType) {
            //likes链接
            String likesUrl = likesUrlFormat.replace("#contentId", topicId).replace("#contentType", contentType);
            CrawlerRequestRecord likesRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(likesUrl)
                    .httpUrl(likesUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            String timestamp = String.valueOf(System.currentTimeMillis());
            String sign = generatorSignFromUrl(likesUrl, timestamp);
            HttpRequest likesRequest = likesRecord.getHttpRequest();
            likesRequest.addHeader("Host", "news.yiche.com");
            likesRequest.addHeader("Referer", lastRequestUrl);
            likesRequest.addHeader("User-Agent", getRandomUA());
            likesRequest.addHeader("x-platform", "pc");
            likesRequest.addHeader("x-sign", sign);
            likesRequest.addHeader("x-timestamp", timestamp);
            likesRequest.addHeader("x-user-guid", UUID.randomUUID().toString());
            parsedLinks.add(likesRecord);

            //views的链接
            String viewsUrl = String.format(viewsUrlFormat, topicId);
            CrawlerRequestRecord viewsRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(viewsUrl)
                    .httpUrl(viewsUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            HttpRequest viewsRequest = viewsRecord.getHttpRequest();
            viewsRequest.addHeader("Host", "newsapi.yiche.com");
            viewsRequest.addHeader("Referer", "https://news.yiche.com/");
            viewsRequest.addHeader("User-Agent", getRandomUA());
            parsedLinks.add(viewsRecord);

            //粉丝数
            try {
                String authorUrl = httpPage.getHtml().xpath("//a[@class=\"news-detail-profile-active\"]/@href").get();
                String authorId = authorUrl.split("/u")[1];
                String followsUrl = followsUrlFormat.replace("#uid", authorId);
                CrawlerRequestRecord followsRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(followsUrl)
                        .httpUrl(followsUrl)
                        .releaseTime(crawlerRequestRecord.getReleaseTime())
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                        .build();
                String timestampTwo = String.valueOf(System.currentTimeMillis());
                String sign2 = generatorSignFromUrl(followsUrl, timestampTwo);
                HttpRequest followsRequest = followsRecord.getHttpRequest();
                followsRequest.addHeader("Host", "news.yiche.com");
                followsRequest.addHeader("Referer", lastRequestUrl);
                followsRequest.addHeader("User-Agent", getRandomUA());
                followsRequest.addHeader("x-platform", "pc");
                followsRequest.addHeader("x-sign", sign2);
                followsRequest.addHeader("x-timestamp", timestampTwo);
                followsRequest.addHeader("x-user-guid", UUID.randomUUID().toString());
                parsedLinks.add(followsRecord);
            } catch (Exception e) {
                extras.put("follows", "0");
            }
        }
        // 评论的翻页
        String comments = httpPage.getHtml().xpath("//li[@class=\"news-detail-position-pinglun\"]/a/text()").get().trim();
        try {
//
            if (comments.contains("评论")){
                comments="0";
            }
            int cmt = Integer.parseInt(comments);
        } catch (Exception e) {
            logger.error("error : {} , comments: {}", e.getMessage(), comments);
            comments = "0";
        }
        if (Integer.parseInt(comments) > 0) {
            String commentUrl = String.format(commentUrlFormat, productId, topicId, 1, 50, System.currentTimeMillis());
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(commentUrl)
                    .httpUrl(commentUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .needWashed(true)
                    .copyBizTags()
                    .notFilterRecord()
                    .build();
            Map<String, Object> commentExtras = copyExtras(extras);
            commentExtras.put("currentNum", 1);
            commentExtras.put("productId", productId);
            commentRecord.getHttpRequest().setExtras(commentExtras);

            parsedLinks.add(commentRecord);
        } else {
            logger.info("page [{}] no comment : [{}]", lastRequestUrl, comments);
        }

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest) {
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        int code = pageObj.getIntValue("code");
        String msg = pageObj.getString("msg");
        int size = pageObj.getJSONObject("result").getIntValue("size");
        int total = pageObj.getJSONObject("result").getIntValue("total");
        int listSize = pageObj.getJSONObject("result").getJSONArray("list").size();
        if (code == 0 && "OK".equalsIgnoreCase(msg) && total == size) {
            if (listSize > 0) {
                crawlerRequestRecord.setNeedWashPage(true);
                Map<String, Object> extras = lastRequest.getExtras();
                int currentNum = (int) extras.get("currentNum");
                String topicId = (String) extras.get("topicId");
                String productId = (String) extras.get("productId");
                String nextCommentUrl = String.format(commentUrlFormat, productId, topicId, (currentNum + 1), 50, System.currentTimeMillis());
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextCommentUrl)
                        .httpUrl(nextCommentUrl)
                        .releaseTime(crawlerRequestRecord.getReleaseTime())
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .needWashed(true)
                        .copyBizTags()
                        .notFilterRecord()
                        .build();
                Map<String, Object> commentExtras = copyExtras(extras);
                commentExtras.put("currentNum", currentNum + 1);
                commentRecord.getHttpRequest().setExtras(commentExtras);
                parsedLinks.add(commentRecord);
            }
        }
        return parsedLinks;
    }

    /**
     * @param url 请求Url
     * @return header的sign值
     */
    private String generatorSignFromUrl(String url, String timestamp) {
        Map<String, Object> urlParams = getUrlParams(url);
        try {
            if (null != urlParams) {
                String u = "19DDD1FBDFF065D3A4DA777D2D7A81EC";
                String cid = (String) urlParams.get("cid");
                String param = URLDecoder.decode((String) urlParams.get("param"), "utf-8");
                String s = "cid=" + cid + "&param=" + param + u + timestamp;
                return DigestUtils.md5DigestAsHex(s.getBytes());
            }
        } catch (UnsupportedEncodingException e) {
            logger.error(e.getMessage(), "url decode error");
        }
        return UUID.randomUUID().toString().replaceAll("-", "");
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        HttpRequest httpRequest = crawlerRecord.getHttpRequest();
        String requestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        if (requestUrl.matches(articleUrlRegex)) {
            for (CrawlerRequestRecord downloadRecord : internalDownloadRecords) {
                HttpRequest recordHttpRequest = downloadRecord.getHttpRequest();
                String recordHttpRequestUrl = recordHttpRequest.getUrl();
                if (recordHttpRequestUrl.matches(likesUrlRegex)) {
                    try {
                        JSONObject pageObject = JSONObject.parseObject(downloadRecord.getInternalDownloadPage().getRawText());
                        if (pageObject.getIntValue("status") == 1 && "success".equalsIgnoreCase(pageObject.getString("message"))) {
                            JSONObject dataObj = pageObject.getJSONObject("data");
                            String likes = dataObj.getString("supportCount");
                            extras.put("likes", likes);
                        } else {
                            extras.put("likes", "0");
                        }
                    } catch (Exception e) {
                        extras.put("likes", "0");
                    }
                }
                if (recordHttpRequestUrl.matches(viewsUrlRegex)) {
                    try {
                        JSONObject pageObject = JSONObject.parseObject(downloadRecord.getInternalDownloadPage().getRawText());
                        if (pageObject.getIntValue("status") == 1 && "ok".equalsIgnoreCase(pageObject.getString("message"))) {
                            JSONArray objects = pageObject.getJSONArray("data");
                            if (objects.size() > 0) {
                                JSONObject viewsObj = objects.getJSONObject(0);
                                String views = viewsObj.getString("num");
                                extras.put("views", views);
                            } else {
                                extras.put("views", "0");
                            }
                        }
                    } catch (Exception e) {
                        extras.put("views", "0");
                    }

                }
                if (recordHttpRequestUrl.matches(followsUrlRegex)) {
                    try {
                        JSONObject pageObject = JSONObject.parseObject(downloadRecord.getInternalDownloadPage().getRawText());
                        if (pageObject.getIntValue("status") == 1 && "success".equalsIgnoreCase(pageObject.getString("message"))) {
                            JSONObject dataObj = pageObject.getJSONObject("data");
                            String follows = dataObj.getString("fansCount");
                            extras.put("follows", follows);
                        } else {
                            extras.put("follows", "0");
                        }
                    } catch (Exception e) {
                        extras.put("follows", "0");
                    }
                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)) {
            crawlerDataList.addAll(washArticle(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)) {
            crawlerDataList.addAll(washInteraction(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRequestRecord, httpPage));
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        Html pageHtml = httpPage.getHtml();
        String title = pageHtml.xpath("//h1[@class=\"news-detail-header\"]/text()").get();
        title = unescapeHtml2J(title);
        String author = pageHtml.xpath("//a[@class=\"news-detail-profile-active\"]/text()").get();
        String authorUrl = pageHtml.xpath("//a[@class=\"news-detail-profile-active\"]/@href").get();
        String authorId = authorUrl.split("/u")[1];
        if (authorId.endsWith("/")) {
            authorId = authorId.replace("/", "");
        }
       // List<String> allContents = pageHtml.xpath("//div[@class=\"news-detail-main motu_cont\"]//p//text() | //div[@class=\"news-detail-main motu_cont\"]//text()").all();
        List<String> allContents = pageHtml.xpath("//div[@class=\"news-detail-main motu_cont\"]//p//text()").all();
        StringBuffer content = new StringBuffer();
        for (String allContent : allContents) {
            content = content.append(allContent);
        }

        List<String> allImages = pageHtml.xpath("//div[@class=\"news-detail-main motu_cont\"]//p/img/@data-original").all();
        StringBuffer imageUrl = new StringBuffer();
        for (String allImage : allImages) {
            imageUrl = imageUrl.append(allImage).append("\\x01");
        }
        String publishTime = "";
        Matcher matcher = Pattern.compile("publishTime:\\s*'\\S*\\s*\\S*',").matcher(httpPage.getRawText());
        while (matcher.find()) {
            publishTime = matcher.group(0).split("'")[1];
        }
        String contentId = (String) extras.get("topicId");
        long releaseTime = 0;
        try {
            releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime, "yyyy-MM-dd HH:mm:ss").getTime();
        } catch (ParseException e) {
            logger.warn(e.getMessage(), "parse date error");
            releaseTime = crawlerRequestRecord.getReleaseTime();
        }
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        String follows = (String) extras.get("follows");
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .url(httpRequest.getUrl())
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), contentId))
                .releaseTime(releaseTime)
                .addContentKV(Field_Title, title)
                .addContentKV(Field_Content, unescapeHtml2J(content.toString()))
                .addContentKV(Field_Author, author)
                .addContentKV(Field_Author_Id, authorId)
                .addContentKV(Field_Author_Follows, follows)
                .addContentKV(Field_Images, imageUrl.toString())
                .resultLabelTag(article)
                .flowInPipelineTag("kafka_result")
                .flowInPipelineTag("file_result")
                .build();
        crawlerData.setFilterPipelineResult(true);
        crawlerData.tagsCreator().bizTags().addSiteBiz("hao_article");
        //流向redis的data
        CrawlerData crawlerRedisResultData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .url(httpRequest.getUrl())
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), contentId))
                .releaseTime(releaseTime)
                .addContentKV(Field_Urls, crawlerRequestRecord.getHttpRequest().getUrl())
                .addContentKV("releaseTime", String.valueOf(releaseTime))
                .resultLabelTag(article)
                .flowInPipelineTag("redis_result")
                .build();
        crawlerRedisResultData.setFilterPipelineResult(true);

        //车系信息
        List<Map<String, String>> seriesList = new ArrayList<>();
        List<Selectable> seriesNodes = pageHtml.xpath("//div[@class=\"tempWrap\"]/ul/li").nodes();
        if (null != seriesNodes && seriesNodes.size() > 0) {
            for (Selectable seriesNode : seriesNodes) {
                String seriesId = seriesNode.xpath("./a[@target]/@data-id").get();
                String seriesUrl = seriesNode.xpath("./a[@target]/@href").get();
                String seriesName = seriesNode.xpath("./a[@target]/h3/text()").get();
                Map<String, String> seriesInfo = new HashMap<>();
                seriesInfo.put("series_name", seriesName);
                seriesInfo.put("series_url", seriesUrl);
                seriesInfo.put("series_id", seriesId);
                seriesList.add(seriesInfo);
            }
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series, seriesList);
            crawlerRedisResultData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series, seriesList);
        }
        crawlerDataList.add(crawlerData);
        crawlerDataList.add(crawlerRedisResultData);
        return crawlerDataList;
    }

    private List<CrawlerData> washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        Html pageHtml = httpPage.getHtml();
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (lastRequestUrl.matches(articleUrlRegex)) {
            String contentId = (String) extras.get("topicId");
            String comments = pageHtml.xpath("//li[@class=\"news-detail-position-pinglun\"]/a/text()").get().trim();
            try {
//
                if (comments.contains("评论")){
                    comments="0";
                }
                int cmt = Integer.parseInt(comments);
            } catch (Exception e) {
                comments = "0";
            }
            String likes = (String) extras.get("likes");
            String views = (String) extras.get("views");
            long releaseTime = 0;
            String publishTime = "";
            Matcher matcher = Pattern.compile("publishTime:\\s*'\\S*\\s*\\S*',").matcher(httpPage.getRawText());
            while (matcher.find()) {
                publishTime = matcher.group(0).split("'")[1];
            }
            try {
                releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime, "yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                logger.warn(e.getMessage(), "parse date error");
                releaseTime = crawlerRequestRecord.getReleaseTime();
            }

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .url(httpRequest.getUrl())
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), contentId))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), contentId))
                    .releaseTime(releaseTime)
                    .addContentKV(Field_I_Comments, comments)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_I_Views, views)
                    .resultLabelTag(interaction)
                    .flowInPipelineTag("file_result")
                    .flowInPipelineTag("kafka_result")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            crawlerData.tagsCreator().bizTags().addSiteBiz("hao_article");
            crawlerDataList.add(crawlerData);
        }
        if (lastRequestUrl.matches(commentUrlRegex)) {
            //解析列表
            JSONObject resultObj = JSONObject.parseObject(httpPage.getRawText()).getJSONObject("result");
            JSONArray commentsList = resultObj.getJSONArray("list");
            for (Object commentItem : commentsList) {
                JSONObject commentObj = (JSONObject) commentItem;
                String contentId = commentObj.getString("id");
                String likes = commentObj.getString("likeCount");
                String publishTime = commentObj.getString("createTime");
                long releaseTime = 0;
                try {
                    releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime, "yyyy-MM-dd HH:mm:ss").getTime();
                } catch (ParseException e) {
                    logger.warn(e.getMessage(), "parse date error");
                    releaseTime = crawlerRequestRecord.getReleaseTime();
                }
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
//                        .url((String) extras.get("itemUrl"))
                        .url(httpPage.getRequest().getUrl())
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), contentId))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                        .releaseTime(releaseTime)
                        .addContentKV(Field_I_Likes, likes)
                        .resultLabelTag(interaction)
                        .flowInPipelineTag("file_result")
                        .flowInPipelineTag("kafka_result")
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .build();
                crawlerData.tagsCreator().bizTags().addSiteBiz("hao_article");
                crawlerData.setFilterPipelineResult(true);
                crawlerDataList.add(crawlerData);
            }
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String articleKey = (String) extras.get("topicId");
        //解析列表
        JSONObject resultObj = JSONObject.parseObject(httpPage.getRawText()).getJSONObject("result");
        JSONArray commentsList = resultObj.getJSONArray("list");
        for (Object commentItem : commentsList) {
            JSONObject commentObj = (JSONObject) commentItem;
            String contentId = commentObj.getString("id");
            String content = commentObj.getString("content");
            String author = commentObj.getString("showName");
            String authorId = commentObj.getString("userId");
            String floor = commentObj.getString("floor");

            String publishTime = commentObj.getString("createTime");
            long releaseTime = 0;
            try {
                releaseTime = StringUtils.isBlank(publishTime) ? crawlerRequestRecord.getReleaseTime() : DateUtils.parseDate(publishTime, "yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                logger.warn(e.getMessage(), "parse date error");
                releaseTime = crawlerRequestRecord.getReleaseTime();
            }
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .url(httpRequest.getUrl())
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .releaseTime(releaseTime)
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Floor, floor)
                    .resultLabelTag(comment)
                    .flowInPipelineTag("file_result")
                    .flowInPipelineTag("kafka_result")
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .build();
            crawlerData.tagsCreator().bizTags().addSiteBiz("hao_article");
            crawlerData.setFilterPipelineResult(true);
            crawlerDataList.add(crawlerData);
        }
        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    /**
     * 将url参数转换成map
     *
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = null;
        if (url.contains("?")) {
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    private static List<String> agentList = new ArrayList<>();

    static {
        agentList.add("Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)");
        agentList.add("Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.28.3 (KHTML, like Gecko) Version/3.2.3 ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/525.28.3");
        agentList.add("Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16");
        agentList.add("Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14");
        agentList.add("Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14");
        agentList.add("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14");
        agentList.add("Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02");
        agentList.add("Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0");
        agentList.add("Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.13 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400");
    }

    private static String getRandomUA() {
        return agentList.get(RandomUtils.nextInt(0, agentList.size() - 1));
    }

    private long cleanTime(String timeStr) throws ParseException {
        if (timeStr.contains("发布")) {
            timeStr = timeStr.split("：")[1];
        }
        if (timeStr.contains("刚刚")) {
            return System.currentTimeMillis() - (1000 * 60);
        } else if (timeStr.contains("前")) {
            return timeBefore(timeStr);
        } else if(timeStr.matches("\\d+-\\d+-\\d+")){
            return DateUtils.parseDate(timeStr, "yyyy-MM-dd").getTime();
        }else if (timeStr.contains("-")) {
            return DateUtils.parseDate(timeStr, "yyyy-MM-dd HH:mm:ss").getTime();
        } else {
            return 0;
        }

    }

    private long timeBefore(String timeStr) {
        if (timeStr.matches("\\d*天前")) {
            int timeNum = Integer.parseInt(timeStr.split("天")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_DAY);

        } else if (timeStr.matches("\\d*秒前")) {
            int timeNum = Integer.parseInt(timeStr.split("秒")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_SECOND);

        } else if (timeStr.matches("\\d*分钟前")) {
            int timeNum = Integer.parseInt(timeStr.split("分钟")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_MINUTE);

        } else if (timeStr.matches("\\d*分钟\\d*秒前")) {
            String[] split = timeStr.split("分钟");
            int minutes = Integer.parseInt(split[0]);
            int seconds = Integer.parseInt(split[1].split("秒")[0]);
            long times = (minutes * ONE_MINUTE) + (seconds * ONE_SECOND);
            return System.currentTimeMillis() - times;

        } else if (timeStr.matches("\\d*小时前")) {
            int timeNum = Integer.parseInt(timeStr.split("小时")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_HOUR);

        } else if (timeStr.matches("\\d*小时\\d*分钟前")) {
            String[] split = timeStr.split("小时");
            int hours = Integer.parseInt(split[0]);
            int minutes = Integer.parseInt(split[1].split("分钟")[0]);
            long times = (hours * ONE_HOUR) + (minutes * ONE_MINUTE);
            return System.currentTimeMillis() - times;

        } else if (timeStr.matches("\\d*周前")) {
            int timeNum = Integer.parseInt(timeStr.split("周")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_DAY * 7);

        } else if (timeStr.matches("\\d*个月前")) {
            int timeNum = Integer.parseInt(timeStr.split("个")[0]);
            return System.currentTimeMillis() - (timeNum * ONE_DAY * 31);
        } else {
            return 0;

        }
    }

    public static Map<String, Object> copyExtras(Map<String, Object> inExtras) {
        Map<String, Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(), entry.getValue());
        }
        return extras;
    }

    public static String unescapeHtml2J(String str) {
        int times = 0;
        while (str.contains("&") && str.contains(";")) {
            str = StringEscapeUtils.unescapeHtml(str);
            times++;
            if (times > 5) {
                break;
            }
        }
        return str;
    }

    /**
     * 　* 获取txt文件内容, 并按行放入list中
     *
     */
    public static List<String> getContent(String path) {
        FileReader fileReader = null;
        BufferedReader bufferedReader = null;
        List<String> list = new ArrayList<String>();
        String str = "";
        try {
            fileReader = new FileReader(path);
            bufferedReader = new BufferedReader(fileReader);
            while ((str = bufferedReader.readLine()) != null) {
                if (str.trim().length() > 2) {
                    list.add(str);
                }
            }
        } catch (Exception e) {
            logger.error(e.getMessage());
        } finally {
            try {
                if (bufferedReader != null) {
                    bufferedReader.close();
                }
            } catch (Exception e2) {
                logger.error(e2.getMessage());
            }

            try {
                if (fileReader != null) {
                    fileReader.close();
                }
            } catch (Exception e2) {
                logger.error(e2.getMessage());
            }
        }
        return list;
    }

}
