package com.chance.cc.crawler.development.scripts.weibo.ads;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;

/**
 * @ClassName weibo
 * @Description TODO
 * @Author songding
 * @Date 2021/9/28 11:04
 * @Version 1.0
 **/
public class WeiboUserCrawlerScript extends CrawlerCommonScript {
    private static Logger log = LoggerFactory.getLogger(WeiboUserCrawlerScript.class);
    private static final String domain = "weibo";
    private static final String site = "user";


    private static final String userUrl = "https://weibo.com/u/";
    private static final String turnPage = "turnPage";//翻页参数

    private static final Map<String,String> cookieMap = new HashMap<>();
    private static final String cookie = "_2A25MVrUDDeRhGeNI7lYR-S_Fwz-IHXVvuNtLrDV8PUJbkNANLVmkkW1NSExA2JFf5T7pSaFeSBmHX3qvqAFPvWoG";
    //第一页 分页 第一页参数

    private static final String params1 = "?pids=Pl_Official_MyProfileFeed__17&profile_ftype=1&is_all=1&ajaxpagelet=1&ajaxpagelet_v6=1";
    private static final String params2 = "&__ref=%2Fu%2F";
    private static final String params3 = "%3Fprofile_ftype%3D1%26is_all%3D1%23_0";
    private static final String params4 = "&_t=FM_%s";

    private static final String userMessageUrl = "https://weibo.com/aj/v6/user/newcard?ajwvr=6&id=%s&type=0&refer_flag=1005055013_&callback=STK_%s24";

    //第一页  分页的第二页  pagebar代表分页数 0 代表分页第二页  1 代表分页第三页   一共两页   page 和 prepage 代表当前页数 && 需要替换参数 pagebar分页  uid   uid  page页数 pre_page页数  _rnd 时间戳
    private static final String pagingUrl = "https://weibo.com/p/aj/v6/mblog/mbloglist?ajwvr=6&domain=100505&is_all=1&pagebar=%s&pl_name=Pl_Official_MyProfileFeed__17&id=100505%s&script_uri=/u/%s&feed_type=0&page=%s&pre_page=%s&domain_op=100505&__rnd=%s";

    //第二页以及多页  的分页  pagebar代表分页数 0 代表分页第一页  1 代表分页第二页   一共两页   page 和 prepage 代表当前页数
    private static final String allPagingUrl = "https://weibo.com/p/aj/v6/mblog/mbloglist?ajwvr=6&domain=100505&is_search=0&visible=0&is_all=1&is_tag=0&profile_ftype=1&page=%s&pagebar=%s&pl_name=Pl_Official_MyProfileFeed__17&id=100505%s&script_uri=/u/7384381122&feed_type=0&pre_page=%s&domain_op=100505&__rnd=%s";


    private static final String userRegulars = "https://weibo.com/u/\\S*";
    private static final String userTurnPageRegulars = "https://weibo.com/u/\\S*\\?pids=\\S*";
    private static final String pagingRegulars = "https://weibo.com/p/aj/v6/mblog/mbloglist\\S*";
    private static final String userMessageRegulars = "https://weibo.com/aj/v6/user/newcard\\S*";
    @Override
    public void initUrlRegulars() {
        addUrlRegular(userRegulars);
        addUrlRegular(userTurnPageRegulars);
        addUrlRegular(pagingRegulars);
        addUrlRegular(userMessageRegulars);
    }

    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> prepareLinks = new ArrayList<>();
        if (supportSourceRecords != null) {
            for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
                String url = supportSourceRecord.getHttpRequest().getUrl();
                if (url.contains("keys")) {
                    HttpPage page = supportSourceRecord.getInternalDownloadPage();
                    String msg = page.getJson().jsonPath($_type + ".msg").get();
                    if (msg.equals("success")) {
                        List<String> all = page.getJson().jsonPath($_type + ".content").all();
                        StringBuffer paramss = new StringBuffer();
                        CrawlerRequestRecord record = null;
                        for (String data : all) {
                            JSONObject jsonObject = JSONObject.parseObject(data);
                            String keyword = jsonObject.getString("keyword");
                            String urlUser = userUrl + keyword;
                            CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                                    .itemPageRequest(crawlerRequestRecord)
                                    .httpUrl(urlUser)
                                    .releaseTime(System.currentTimeMillis())
                                    .copyBizTags()
                                    .copyResultTags()
                                    .build();
                            prepareLinks.add(requestRecord);
                        }
                    }
                }
            }
        }
        return prepareLinks;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();
        if (page.getStatusCode() != 200 || page.isDownloadSuccess() != true){
            log.error("download error or page != 200  code="+page.getStatusCode());
            this.recordAgainDownload(crawlerRecord,page,parseLinks);
            crawlerRecord.setNeedWashPage(false);
            return parseLinks;
        }
        String url = crawlerRecord.getHttpRequest().getUrl();
        if(url.matches(userRegulars) && !url.matches(userTurnPageRegulars)){
            this.getUserRecord(crawlerRecord,page,parseLinks);//获取作者基本信息record
            this.getListPage(crawlerRecord,page,parseLinks);
        }
        if (url.matches(userTurnPageRegulars)){
            this.getListPageTurn(crawlerRecord,page,parseLinks);
        }
        if (url.matches(pagingRegulars)){
            this.judgeRecord(crawlerRecord,page,parseLinks);//判断是否符合时间范围 如果符合 生成 第一页第三个分页
        }
        return parseLinks;
    }

    private void getUserRecord(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        String[] split = url.split("https://weibo.com/u/");
        String userId = split[1];
        String referer = "https://weibo.com/p/100505"+userId + "?is_all=1";
        Map<String,String> headMap = new HashMap<>();
        headMap.put("referer",referer);
        String userRecord = String.format(userMessageUrl,userId,System.currentTimeMillis());
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(userRecord)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        record.getHttpRequest().setHeaders(headMap);
        record.getHttpRequest().setCookies(cookieMap);
        parseLinks.add(record);

    }

    private void judgeRecord(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String s = page.getJson().jsonPath($_type + ".data").get();
        page.setHtml(Html.create(s));
        List<Selectable> nodes = page.getHtml().xpath("/html/body/div[@action-data=\"cur_visible=0\"]").nodes();
        for (Selectable node :nodes){
            String time = node.xpath("./div[2]/div[4]/div[2]/a/text()").get();
            if (time == null){
                 time = node.xpath("./div[1]/div[4]/div[2]/a/text()").get();
            }
            if (time == null){
                time = node.xpath("./div[2]/div[3]/div[2]/a/text()").get();
            }
            if (time == null){
                continue;
            }
            Long releaseTime = this.getReleaseTime(time);
            if (!isDateRange(crawlerRecord,releaseTime)){
                return;
            }
        }
        String pagebar = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("pagebar");
        if (pagebar.equals("1")){//生成 第二页的分页 第一页
            Integer pages = 0;
            if (crawlerRecord.tagsCreator().bizTags().hasKVTag(turnPage)){
                KVTag kvTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(turnPage);
                pages = (Integer) kvTag.getVal();
                pages +=1;
            }else{

            }
            String userId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("userId");
            String url = userUrl+userId+String.format(params1,pages)+params2+userId+params3+String.format(params4,String.valueOf(System.currentTimeMillis())+530);
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .releaseTime(System.currentTimeMillis())
                    .httpUrl(url)
                    .copyBizTags()
                    .copyRequestTags()
                    .copyResultTags()
                    .build();
            record.getHttpRequest().setCookies(cookieMap);
            record.tagsCreator().bizTags().addCustomKV(turnPage,pages);//页码数
            parseLinks.add(record);
        }else{
            this.getPagingRecord(crawlerRecord,page,parseLinks);
        }
    }

    /*
    * 从第一页的第一分页 开始获取  第一页第二分页 微博信息
    * */
    private void getListPageTurn(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        boolean to = this.getPaging(crawlerRecord,page,parseLinks);//先获取第一页的第一分页信息 判断是否符合时间范围
        if (to ==false){
            return;
        }
        this.getPagingRecord(crawlerRecord,page,parseLinks);
    }
    /*
    * 第一页  第二分页 urlRecord
    * */

    private void getPagingRecord(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks){
        String pagebar = null;
        Integer val = 0;
        if (crawlerRecord.tagsCreator().bizTags().hasKVTag("pagebar") && crawlerRecord.tagsCreator().bizTags().hasKVTag("page")){
            pagebar = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("pagebar");//当前分页数
            if (pagebar.equals("0")){
                pagebar = "1";
            }else{
                pagebar = "0";
            }
            KVTag page1 = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("page");
            val = (Integer) page1.getVal(); //当前页数
            val +=1;
        }else{
            val =1;
            pagebar = "0";
        }
        String userId = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("userId");
        String url = String.format(pagingUrl,pagebar,userId,userId,val,val,System.currentTimeMillis());
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .copyRequestTags()
                .build();
        record.getHttpRequest().setCookies(cookieMap);
        record.tagsCreator().bizTags().addCustomKV("pagebar",pagebar);
        record.tagsCreator().bizTags().addCustomKV("page",val);
        parseLinks.add(record);
    }
    /*
    * 判断 分页中数据是否在时间范围内
    * */
    private boolean getPaging(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        boolean to = true;
        String rawText = page.getRawText();
        rawText = rawText.substring(23,rawText.length()-11);
        JSONObject jsonObject = JSONObject.parseObject(rawText);
        String html = jsonObject.getString("html");
        page.setHtml(Html.create(html));
        List<Selectable> nodes = page.getHtml().xpath("/html/body/div/div[@action-data=\"cur_visible=0\"]").nodes();
        for (Selectable node : nodes){
            String time = node.xpath("./div[1]/div[4]/div[2]/a/text()").get();
            Long releaseTime = this.getReleaseTime(time);
            if (!isDateRange(crawlerRecord,releaseTime)){
                return to = false;
            }
        }
        return to;
    }

    /*
    * 得到第一页微博信息
    * */
    private void getListPage(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        Integer pages = 1;
        String[] split = url.split("https://weibo.com/u/");
        String userId = split[1];
        url = url+String.format(params1,pages)+params2+userId+params3+String.format(params4,String.valueOf(System.currentTimeMillis())+530);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .releaseTime(System.currentTimeMillis())
                .httpUrl(url)
                .copyBizTags()
                .copyResultTags()
                .build();
        record.tagsCreator().bizTags().addCustomKV("userId",userId);
        record.tagsCreator().bizTags().addCustomKV(turnPage,pages);//页码数
        record.getHttpRequest().setCookies(cookieMap);
        parseLinks.add(record);
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> dataList = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (url.matches(userTurnPageRegulars)){//清洗第一页 第一分页
            this.washListPageTurn(crawlerRecord,page,dataList);
        }
        if (url.matches(pagingRegulars)){//清洗第一页  第二分页 第三分页
            this.washPaging(crawlerRecord,page,dataList);
        }
        if (url.matches(userMessageRegulars)){//清洗作者的信息
            this.washUserMessage(crawlerRecord,page,dataList);
        }
        return dataList;
    }

    private void washUserMessage(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        String rawText = page.getRawText();
        String substring = rawText.substring(24, rawText.length() - 13);
        JSONObject jsonObject = JSONObject.parseObject(substring);
        String data = jsonObject.getString("data");
        page.setHtml(Html.create(data));
        String content = page.getHtml().xpath("/html/body/div/div[1]/div[2]/div[2]/span/text()").get();
        String attention = page.getHtml().xpath("/html/body/div/div[2]/div[1]/span[1]/a/em/text()").get();
        String follows = page.getHtml().xpath("/html/body/div/div[2]/div[1]/span[2]/a/em/text()").get();
        String invitation = page.getHtml().xpath("/html/body/div/div[2]/div[1]/span[3]/a/em/text()").get();
        String author = page.getHtml().xpath("/html/body/div/div[1]/div[2]/div[1]/a/text()").get();
        Integer a = 0;
        if (follows.contains("万")){
            follows = follows.substring(0,follows.length()-3);
            follows =follows + "0000";
        }else{
            follows = follows;
        }

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRecord,page)
                .url(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .addContentKV(AICCommonField.Field_Content,content)
                .addContentKV(AICCommonField.Field_Author,author)
                .addContentKV(AutoVMCommonField.Field_Author_Follows, follows)
                .addContentKV(AICCommonField.Field_Post_Plished_Number,invitation)
                .addContentKV(AICCommonField.Field_Attention_Number,attention)
                .build();
        dataList.add(crawlerData);
    }

    private void washPaging(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List<Selectable> nodes = page.getHtml().xpath("/html/body/div[@action-data=\"cur_visible=0\"]").nodes();
        for (Selectable node :nodes){
            String time = node.xpath("./div[2]/div[4]/div[2]/a/text()").get();
            if(time == null){
               time = node.xpath("./div[1]/div[4]/div[2]/a/text()").get();
            }
            if (time == null){
                time = node.xpath("./div[2]/div[3]/div[2]/a/text()").get();
            }
            if (time == null){
                continue;
            }
            Long releaseTime = this.getReleaseTime(time);
            if (!isDateRange(crawlerRecord,releaseTime)){
                return;
            }
            List<String> all = node.xpath("./div[1]/div[4]/div[4]/text()").all();
            StringBuffer s = new StringBuffer();//帖子内容
            for (String a : all){
                s.append(a);
            }
            String author = node.xpath("./div[1]/div[4]/div[1]/a/text()").get();
            String forwards = node.xpath(".//ul[@class=\"clearfix\"]/li[1]/span/a/span/em[2]/text()").get();//转发数
            String comments = node.xpath(".//ul[@class=\"clearfix\"]/li[2]/span/a/span/em[2]/text()").get();//评论数
            String likes = node.xpath(".//ul[@class=\"clearfix\"]/li[3]/span/a/span/em[2]/text()").get();//点赞数
            if (StringUtils.isBlank(forwards)){
                forwards = "0";
            }
            if (StringUtils.isBlank(comments)){
                comments = "0";
            }
            if (StringUtils.isBlank(likes)){
                likes = "0";
            }
            int i = new Random().nextInt(100000000);
            CrawlerData crawlerArticle = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,i))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(releaseTime)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .addContentKV(AICCommonField.Field_Author,author)
                    .addContentKV(AICCommonField.Field_Content, String.valueOf(s))
                    .build();
            dataList.add(crawlerArticle);
            CrawlerData crawlerInteraction = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.interaction,i))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(releaseTime)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .addContentKV(AICCommonField.Field_I_Forwards,forwards)
                    .addContentKV(AICCommonField.Field_I_Comments,comments)
                    .addContentKV(AICCommonField.Field_I_Likes,likes)
                    .build();
            dataList.add(crawlerInteraction);
        }
    }

    /*
    * 清洗第一页  第一分页 内容
    * */
    private void washListPageTurn(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List<Selectable> nodes = page.getHtml().xpath("/html/body/div/div[@action-data=\"cur_visible=0\"]").nodes();
        for (Selectable node : nodes){
            String time = node.xpath("./div[1]/div[4]/div[2]/a/text()").get();
            Long releaseTime = this.getReleaseTime(time);
            if (!isDateRange(crawlerRecord,releaseTime)){
                return;
            }
            List<String> all = node.xpath("./div[1]/div[4]/div[4]/text()").all();
            StringBuffer s = new StringBuffer();//帖子内容
            for (String a : all){
                s.append(a);
            }
            String author = node.xpath("./div[1]/div[4]/div[1]/a/text()").get();
            String forwards = node.xpath(".//ul[@class=\"clearfix\"]/li[1]/span/a/span/em[2]/text()").get();//转发数
            String comments = node.xpath(".//ul[@class=\"clearfix\"]/li[2]/span/a/span/em[2]/text()").get();//评论数
            String likes = node.xpath(".//ul[@class=\"clearfix\"]/li[3]/span/a/span/em[2]/text()").get();//点赞数
            if (StringUtils.isBlank(forwards)){
                forwards = "0";
            }
            if (StringUtils.isBlank(comments)){
                comments = "0";
            }
            if (StringUtils.isBlank(likes)){
                likes = "0";
            }
            int i = new Random().nextInt(100000000);
            CrawlerData crawlerArticle = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,i))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(releaseTime)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .addContentKV(AICCommonField.Field_Author,author)
                    .addContentKV(AICCommonField.Field_Content, String.valueOf(s))
                    .build();
            dataList.add(crawlerArticle);
            CrawlerData crawlerInteraction = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.interaction,i))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(releaseTime)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .addContentKV(AICCommonField.Field_I_Forwards,forwards)
                    .addContentKV(AICCommonField.Field_I_Comments,comments)
                    .addContentKV(AICCommonField.Field_I_Likes,likes)
                    .build();
            dataList.add(crawlerInteraction);
        }
    }


    private void recordAgainDownload(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        Integer integerCount = 0;
        String count = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("count");
        if (count == null){
            integerCount = 1;
        }else{
            integerCount = Integer.valueOf(count);
            if (integerCount >= 10){
                log.error("The number of downloads exceeds the limit");
                return;
                }
                integerCount+=1;
            }

        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
            record.setRecordKey(crawlerRecord.getHttpRequest().getUrl()+integerCount);
            record.tagsCreator().bizTags().addCustomKV("count",integerCount);
            parseLinks.add(record);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        return crawlerRecord.tagsCreator().bizTags().site().equals(site);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return domain;
    }
    static {
        cookieMap.put("SUB",cookie);
    }
    /**
     * 判断是否在时间范围内
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord,Long releaseTimeToLong){
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    }else if(hourFromNow != 0){
                        endTime = System.currentTimeMillis()-60000;//系统时间减去一分钟
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if(startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime){
                isRange = true;
            }
        }else{
            isRange = true;
        }
        return isRange;
    }


    /*
     * 解析时间
     * */
    private Long getReleaseTime(String time){
        if (time == null){
            return  System.currentTimeMillis();
        }
        Long releaseTime = 0L;
        if (time.contains("秒")){
            return System.currentTimeMillis()-60000;
        }
        if (time.contains("分钟前")){
            time = time.trim();
            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
            String format = dateFormat.format(new Date(System.currentTimeMillis()));
            time = format;
            try {
                releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd").getTime();
            } catch (ParseException e) {
                log.error("get time error");
            }
            return releaseTime;
        }else if (time.contains("今天")){
            time = time.trim();
            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd ");
            String format = dateFormat.format(new Date(System.currentTimeMillis()));
            time = format + time.substring(2,time.length());
            try {
                releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd HH:mm").getTime();
            } catch (ParseException e) {
                log.error("get time error");
            }
            return releaseTime;
        }else if (time.contains("月")){
            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy");
            String format = dateFormat.format(new Date(System.currentTimeMillis()));
            time = format +"年" +time;
            try {
                releaseTime = DateUtils.parseDate(time, "yyyy年MM月dd日 HH:mm").getTime();
            } catch (ParseException e) {
                log.error("get time error");
            }
            return releaseTime;
        }

        return releaseTime;
    }
}
