package com.chance.cc.crawler.development.scripts.weibo.api;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.tags.CategoryTag;
import org.apache.commons.lang3.StringUtils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;

/**
 * @author lt
 * @version 1.0
 * @date 2021-03-15 10:31:32
 * @email okprog@sina.com
 */
public class WeiboKeywordsCrawlerScript extends CrawlerCommonScript {

    private static Logger logger = LoggerFactory.getLogger(WeiboKeywordsCrawlerScript.class);

    /*
    原子计数器实现
     */
    private AtomicInteger downloadTimes = new AtomicInteger(0);
    private AtomicLong beginTime = new AtomicLong(0L);
    private AtomicLong endTime = new AtomicLong(0L);
    private final Object lock = new Object();

    public static final String indexRegex = "https://www\\.weibo\\.com";
    public static final String keysRegex = "https?://\\S*v1/meta/weibo/keys\\S*";
    public static final String searchKWUrlRegex = "https://c\\.api\\.weibo\\.com/2/search/statuses/limited\\.json\\S*";
    public static final String searchAuthorUrlRegex = "https://c\\.api\\.weibo\\.com/2/statuses/user_timeline_batch\\.json\\S*";
    public static final String searchMidUrlRegex = "https://c\\.api\\.weibo\\.com/2/statuses/show_batch/biz\\.json\\S*";

    public static final String searchKWUrlFormat = "https://c.api.weibo.com/2/search/statuses/limited.json?%s";
    public static final String searchAuthorUrlFormat = "https://c.api.weibo.com/2/statuses/user_timeline_batch.json?%s";
    public static final String searchMidUrlFormat = "https://c.api.weibo.com/2/statuses/show_batch/biz.json?%s";
    public static final String interactionUrlFormat = "https://c.api.weibo.com/2/statuses/count/biz.json?%s";
    public static final String authorAgeUrlFormat = "https://c.api.weibo.com/2/users/birthday/other.json?%s";
    public static final String authorTagsUrlFormat = "https://c.api.weibo.com/2/tags/tags_batch/other.json?%s";
    /*
    通用接口Token参数
     */
    private static final String ACCESS_TOKEN_KEY = "access_token";
    private static final String ACCESS_TOKEN = "2.00o4_w1HrAaeYBedf38e38b8SnITmD";
    /*
    关键词搜索接口参数
     */
    private static final String Q_KEY = "q";
    private static final String PAGE_KEY = "page";
    private static final String COUNT_KEY = "count";
    private static final String END_TIME_KEY = "endtime";
    private static final String START_TIME_KEY = "starttime";
    private static final String DUP_KEY = "dup";
    private static final String ANTISPAM_KEY = "antispam";
    /*
    用户信息接口参数
     */
    private static final String UIDS_KEY = "uids";
    /*
    年龄接口参数
     */
    private static final String UID_KEY = "uid";
    /*
    互动量接口参数
     */
    private static final String IDS_KEY = "ids";//详情接口通用
    private static final String EXCLUDE_COMMENT_LIKE_KEY = "exclude_comment_like";
    /*
    微博详情接口 长文本
     */
    private static final String IS_GET_LONG_TEXT_KEY = "isGetLongText";
    /*
    评论接口参数
     */
    private static final String ID_KEY = "id";

    public static final String IS_OUTPUT_Age = "is_output_age";
    public static final String IS_OUTPUT_Tags = "is_output_Tags";
    public static final String IS_OUTPUT_Interaction = "is_output_Interaction";

    private static final String scriptSite = "apikw";

    public static final String kwSite = "medical_keyword";
    public static final String authorSite = "medical_weibo_uid";
    public static final String midSite = "medical_weibo_mid";

    /**
     * 前置准备
     * @param requestRecord last record
     * @param supportSourceRecords 原数据服务
     * @return new records
     */
    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();
        if (supportSourceRecords == null || supportSourceRecords.size() <1){
            return super.prepareRequest(requestRecord,supportSourceRecords);
        }
        CrawlerRequestRecord keywordRecord = supportSourceRecords.get(0);
        String keywordUrl = keywordRecord.getHttpRequest().getUrl();
        if (keywordUrl.matches(keysRegex)){
            try {
                JSONObject jsonObject = JSONObject.parseObject(keywordRecord.getInternalDownloadPage().getRawText());
                if (jsonObject.getIntValue("status") == 0){
                    //初始下载器
                    initCheckDownloadRTimes();
                    JSONArray objects = jsonObject.getJSONArray("content");
                    for (Object object : objects) {
                        String keyword = ((JSONObject)object).getString("keyword");
                        CrawlerRequestRecord crawlerRequestRecord = null;
                        if (keywordUrl.endsWith(kwSite)){
                            crawlerRequestRecord = getSearchKwListRecord(requestRecord, keyword);
                        }
                        if (keywordUrl.endsWith(authorSite)){
                            crawlerRequestRecord = getSearchAuthorUrlRecord(requestRecord, keyword);
                        }
                        if (keywordUrl.endsWith(midSite)){
                            crawlerRequestRecord = getSearchMidUrlRecord(requestRecord, keyword);
                        }
                        if (null != crawlerRequestRecord){
                            crawlerRequestRecord.tagsCreator().bizTags().addKeywords(keyword);
                            if (checkDownloadTimes()){
                                allItemRecords.add(crawlerRequestRecord);
                            }
                        }
                    }
                }
            }catch (Exception e){
                logger.error(e.getMessage(),e);
            }
        }
        if (allItemRecords.isEmpty()){
            return super.prepareRequest(requestRecord,supportSourceRecords);
        }
        return allItemRecords;
    }

    /**
     * get mid record
     * @param requestRecord last record
     * @param keyword mid
     * @return new record
     */
    private CrawlerRequestRecord getSearchMidUrlRecord(CrawlerRequestRecord requestRecord, String keyword) {
        Map<String,Object> params = new HashMap<>();
        params.put(ACCESS_TOKEN_KEY,ACCESS_TOKEN);
        params.put(IS_GET_LONG_TEXT_KEY,"1");
        params.put(IDS_KEY,keyword);
        String itemUrl = String.format(searchMidUrlFormat,asUrlParams(params));
        return  CrawlerRequestRecord.builder()
                .itemPageRequest(requestRecord)
                .httpUrl(itemUrl)
                .recordKey(itemUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                .needParsed(true)
                .needWashed(true)
                .build();
    }

    /**
     * get uid record
     * @param requestRecord last record
     * @param keyword uid
     * @return new record
     */
    private CrawlerRequestRecord getSearchAuthorUrlRecord(CrawlerRequestRecord requestRecord, String keyword){
        Map<String,Object> paramMap = new HashMap<>();
        paramMap.put(ACCESS_TOKEN_KEY,ACCESS_TOKEN);
        paramMap.put(UIDS_KEY,keyword);
        paramMap.put(PAGE_KEY,"1");
        paramMap.put(COUNT_KEY,"20");
        String listUrl = String.format(searchAuthorUrlFormat,asUrlParams(paramMap));

        return CrawlerRequestRecord.builder()
                .turnPageRequest(requestRecord)
                .recordKey(listUrl)
                .httpUrl(listUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .build();
    }

    /**
     * get keyword record
     * @param requestRecord last record
     * @param keyword keyword
     * @return new record
     */
    private CrawlerRequestRecord getSearchKwListRecord(CrawlerRequestRecord requestRecord, String keyword) {
        Map<String,Object> paramMap = new HashMap<>();
        paramMap.put(ACCESS_TOKEN_KEY,ACCESS_TOKEN);
        paramMap.put(Q_KEY,keyword);
        paramMap.put(PAGE_KEY,"1");
        paramMap.put(COUNT_KEY,"20");
        paramMap.put(DUP_KEY,"0");
        paramMap.put(ANTISPAM_KEY,"0");
        paramMap.put(END_TIME_KEY,getSecondTimestamp(System.currentTimeMillis()));
//        paramMap.put(START_TIME_KEY,"1617206400");
//        paramMap.put(END_TIME_KEY,"1617292800");
        String listUrl = String.format(searchKWUrlFormat,asUrlParams(paramMap));

        return CrawlerRequestRecord.builder()
                .turnPageRequest(requestRecord)
                .recordKey(listUrl)
                .httpUrl(listUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .build();
    }


    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        if (404 == httpPage.getStatusCode() || 403 == httpPage.getStatusCode()){
            logger.info("status code is {}",httpPage.getStatusCode());
            logger.info("download times is {}",downloadTimes);
            return parsedLinks;
        }
        if (doHttpPageCheck(crawlerRequestRecord,httpPage)){
            //先进行原子计数，判断一小时内下载次数是否大于50000
            if (checkDownloadTimes()){
                parsedLinks.add(crawlerRequestRecord);
            }
            crawlerRequestRecord.setNeedWashPage(false);//
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            return parsedLinks;
        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(searchKWUrlRegex) || lastRequestUrl.matches(searchAuthorUrlRegex)){
            return parseSearchListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(searchMidUrlRegex)){
            return parseSearchMidLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseSearchMidLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setNeedWashPage(true);
        String rawText = "";
        JSONObject dataObj = null;
        if (StringUtils.isBlank(httpPage.getRawText())){
            rawText = crawlerRequestRecord.getInternalDownloadPage().getRawText();
            dataObj = JSONObject.parseObject(rawText);
        }else {
            rawText = httpPage.getRawText();
            dataObj = JSONObject.parseObject(rawText);
        }

        String createdTime = dataObj.getString("created_at");
        String idStr = dataObj.getString("idstr");
        String uid = dataObj.getJSONObject("user").getString("idstr");
        try {
            long releaseTime = parseGMTTime(createdTime);
            crawlerRequestRecord.setReleaseTime(releaseTime);
        } catch (ParseException e) {
            logger.error("parse GMT time error, this created time is : {}", createdTime);
        }

        //根据调度标签选择添加Interaction、age、tag的内下载生成
        CategoryTag scheduleCategoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
        if (scheduleCategoryTag.isContainKVTag(IS_OUTPUT_Interaction)){
            CrawlerRequestRecord interactionRecord = getInteractionRecord(crawlerRequestRecord, idStr);
            if (checkDownloadTimes()){
                parsedLinks.add(interactionRecord);
            }
        }
        if (scheduleCategoryTag.isContainKVTag(IS_OUTPUT_Age)){
            CrawlerRequestRecord userAgeRecord = getUserAgeRecord(crawlerRequestRecord, uid);
            if (checkDownloadTimes()){
                parsedLinks.add(userAgeRecord);
            }
        }
        if (scheduleCategoryTag.isContainKVTag(IS_OUTPUT_Tags)){
            CrawlerRequestRecord userTagsRecord = getUserTagsRecord(crawlerRequestRecord, uid);
            if (checkDownloadTimes()){
                parsedLinks.add(userTagsRecord);
            }
        }
        return parsedLinks;
    }

    private CrawlerRequestRecord getUserTagsRecord(CrawlerRequestRecord crawlerRequestRecord, String uid) {
        Map<String,Object> paramsMap = new HashMap<>();
        paramsMap.put(ACCESS_TOKEN_KEY,ACCESS_TOKEN);
        paramsMap.put(UIDS_KEY,uid);
        String userTagsUrl = String.format(authorTagsUrlFormat,asUrlParams(paramsMap));
        CrawlerRequestRecord userTagsRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(userTagsUrl)
                .recordKey(userTagsUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        userTagsRecord.tagsCreator().bizTags().getCategoryTag().addKVTag(IS_OUTPUT_Tags,true);
        return userTagsRecord;
    }

    private CrawlerRequestRecord getUserAgeRecord(CrawlerRequestRecord crawlerRequestRecord, String uid) {
        Map<String,Object> paramsMap = new HashMap<>();
        paramsMap.put(ACCESS_TOKEN_KEY,ACCESS_TOKEN);
        paramsMap.put(UID_KEY,uid);
        String userAgeUrl = String.format(authorAgeUrlFormat,asUrlParams(paramsMap));
        CrawlerRequestRecord userAgeRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(userAgeUrl)
                .recordKey(userAgeUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        userAgeRecord.tagsCreator().bizTags().getCategoryTag().addKVTag(IS_OUTPUT_Age,true);
        return userAgeRecord;
    }

    private CrawlerRequestRecord getInteractionRecord(CrawlerRequestRecord crawlerRequestRecord, String idStr) {
        crawlerRequestRecord.tagsCreator().resultTags().addResultDataType(CrawlerEnum.CrawlerDataType.interaction);
        Map<String,Object> paramsMap = new HashMap<>();
        paramsMap.put(ACCESS_TOKEN_KEY,ACCESS_TOKEN);
        paramsMap.put(IDS_KEY,idStr);
        paramsMap.put(EXCLUDE_COMMENT_LIKE_KEY,"1");
        String interactionUrl = String.format(interactionUrlFormat, asUrlParams(paramsMap));
        CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(interactionUrl)
                .recordKey(interactionUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        interactionRecord.tagsCreator().bizTags().getCategoryTag().addKVTag(IS_OUTPUT_Interaction,true);
        return interactionRecord;
    }

    private List<CrawlerRequestRecord> parseSearchListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        JSONObject pageObject = JSONObject.parseObject(httpPage.getRawText());
        JSONArray statusesArray = pageObject.getJSONArray("statuses");
        if (null != statusesArray && statusesArray.size() > 0){
            Map<String, Object> urlParams = getUrlParams(lastRequestUrl);
            if (null != urlParams){
                int page = Integer.parseInt((String) urlParams.get(PAGE_KEY));
                String keyword = URLDecoder.decode((String) urlParams.get("q"));
                page += 1;
                urlParams.put(PAGE_KEY,String.valueOf(page));
                urlParams.put(Q_KEY,keyword);
                String listUrl = "";
                if (lastRequestUrl.matches(searchKWUrlRegex)){
                    listUrl = String.format(searchKWUrlFormat,asUrlParams(urlParams));
                }
                if (lastRequestUrl.matches(searchAuthorUrlRegex)){
                    listUrl = String.format(searchAuthorUrlFormat,asUrlParams(urlParams));
                }
                CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(listUrl)
                        .httpUrl(listUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                if (checkDownloadTimes()){
                    parsedLinks.add(listRecord);
                }
            }else {
                logger.info("list url params is null");
            }
            // 从微博列表中获取mid 请求微博详情接口
            for (Object status : statusesArray) {
                try {
                    JSONObject statusObj = (JSONObject)status;
                    String idStr = statusObj.getString("idstr");
                    Map<String,Object> params = new HashMap<>();
                    params.put(ACCESS_TOKEN_KEY,ACCESS_TOKEN);
                    params.put(IS_GET_LONG_TEXT_KEY,"1");
                    params.put(IDS_KEY,idStr);
                    String itemUrl = String.format(searchMidUrlFormat,asUrlParams(params));
                    String createTime = statusObj.getString("created_at");
                    long releaseTime = parseGMTTime(createTime);
                    String domain = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("domain");
                    String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
                    String keywords = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("keywords");

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(itemUrl)
                            .recordKey(StringUtils.joinWith("-",domain,site,keywords.split("\"")[1],idStr))
                            .releaseTime(releaseTime)
                            .copyBizTags()
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                            .needParsed(true)
                            .needWashed(true)
                            .build();
                    itemRecord.setDownload(false);
                    HttpPage page = new HttpPage();
                    page.setRawText(statusObj.toJSONString());
                    itemRecord.setInternalDownloadPage(page);
                    if (checkDownloadTimes()){
                        parsedLinks.add(itemRecord);
                    }
                } catch (ParseException e) {
                    logger.error(e.getMessage(),e);
                }
            }
        }
        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
            CategoryTag categoryTag = internalDownloadRecord.tagsCreator().bizTags().getCategoryTag();
            if(categoryTag.isContainKVTag(IS_OUTPUT_Interaction)){
                crawlerRecord.getHttpRequest().addExtra(IS_OUTPUT_Interaction,internalDownloadPage.getRawText());
            }
            if (categoryTag.isContainKVTag(IS_OUTPUT_Age)){
                crawlerRecord.getHttpRequest().addExtra(IS_OUTPUT_Age,internalDownloadPage.getRawText());
            }
            if (categoryTag.isContainKVTag(IS_OUTPUT_Tags)){
                crawlerRecord.getHttpRequest().addExtra(IS_OUTPUT_Tags,internalDownloadPage.getRawText());
            }
        }
    }

    @Test
    public void testParse() throws IOException, ParseException {
        String json = new String(Files.readAllBytes(Paths.get("C:\\Users\\Administrator\\Desktop\\json.txt")));
        JSONObject pageObject = JSONObject.parseObject(json);
        JSONArray statusesArray = pageObject.getJSONArray("statuses");
        System.out.println(statusesArray.size());
        String lastRequestUrl = "https://c.api.weibo.com/2/search/statuses/limited.json?access_token=2.00o4_w1HrAaeYBedf38e38b8SnITmD&q=%E9%98%BF%E9%87%8C%E5%81%A5%E5%BA%B7&dup=0&antispam=0&page=1&count=50";
        if (statusesArray.size() > 0){
            Map<String, Object> urlParams = getUrlParams(lastRequestUrl);
            if (null != urlParams){
                int page = Integer.parseInt((String) urlParams.get(PAGE_KEY));
                page += 1;
                urlParams.put(PAGE_KEY,String.valueOf(page));
                String listUrl = String.format(searchKWUrlFormat,asUrlParams(urlParams));
                System.out.println(listUrl);
            }
            // 从微博列表中获取mid 请求微博详情接口
            for (Object status : statusesArray) {
                JSONObject statusObj = (JSONObject)status;
                String idStr = statusObj.getString("idstr");
                Map<String,Object> params = new HashMap<>();
                params.put(ACCESS_TOKEN_KEY,ACCESS_TOKEN);
                params.put(IS_GET_LONG_TEXT_KEY,"1");
                params.put(IDS_KEY,idStr);
                String itemUrl = String.format(searchMidUrlFormat,asUrlParams(params));
                String createTime = statusObj.getString("created_at");
                SimpleDateFormat sdf = new SimpleDateFormat("EEE MMM dd HH:mm:ss Z yyyy",Locale.US);
                Date date = sdf.parse(createTime);
                System.out.println(date.getTime());
                sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                System.out.println(sdf.format(date));

                System.out.println(itemUrl);
                System.out.println(createTime);
            }

        }

    }

    /**
     * 检查页面下载是否成功、完整
     * @param crawlerRequestRecord last record
     * @param httpPage page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200){
            logger.error("download page {} error, status code is {}",lastRequestUrl,statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()){
            logger.error("download page failed, check your link {}",lastRequestUrl);
            return true;
        }
        if (StringUtils.isBlank(httpPage.getRawText())){
            logger.error("download page empty, check your link {}",lastRequestUrl);
            return true;
        }
        try {
            JSONObject.parseObject(httpPage.getRawText());
        }catch (Exception e){
            logger.error("page rawtext can't parse to JSON, check your link {}",lastRequestUrl);
            return true;
        }
        return false;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String requestUrl = httpRequest.getUrl();
        String rawText = httpPage.getRawText();
        if (StringUtils.isBlank(rawText)){
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            JSONObject dataObj = JSONObject.parseObject(rawText);
            if (null == dataObj){
                logger.error("download page has no data ,check url {}",requestUrl);
                return crawlerDataList;
            }
            String mid = dataObj.getString("idstr");
            CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
            if (categoryTag.isContainKVTag(IS_OUTPUT_Age)){
                String agePageText = (String)extras.get(IS_OUTPUT_Age);
                if (StringUtils.isNotBlank(agePageText)){
                    try {
                        JSONObject agePageObj = JSONObject.parseObject(agePageText);
                        dataObj.put("age",agePageObj);
                    } catch (Exception e) {
                        logger.info("wash age failed ,download times :{}",downloadTimes);
                        dataObj.put("age","");
                    }
                }else {
                    logger.info("wash age but it's empty");
                }
            }
            if (categoryTag.isContainKVTag(IS_OUTPUT_Tags)){
                String tagsPageText = (String)extras.get(IS_OUTPUT_Tags);
                try {
                    JSONArray jsonArray = JSONObject.parseArray(tagsPageText);
                    if (StringUtils.isNotBlank(tagsPageText) && null != jsonArray && jsonArray.size() > 0){
                        JSONObject tagsPageObj = jsonArray.getJSONObject(0);
                        dataObj.put("tags",tagsPageObj);
                    }else {
                        logger.info("wash tags but it's empty");
                    }
                } catch (Exception e) {
                    logger.error("text is not a json");
                }
            }
            String articleData = dataObj.toJSONString();
            String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
            httpPage.setRequest(httpRequest);
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), mid))
                    .url(requestUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .content(articleData)
                    .resultLabelTag(article)
                    .build();
            crawlerDataList.add(crawlerData);
            if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                String interactionPageText = (String)extras.get(IS_OUTPUT_Interaction);
                JSONArray jsonArray = JSONObject.parseArray(interactionPageText);
                if (StringUtils.isNotBlank(interactionPageText) && null != jsonArray && jsonArray.size() > 0){
                    JSONObject interactionObj = jsonArray.getJSONObject(0);
                    String interactionData = interactionObj.toJSONString();
                    CrawlerData crawlerInteractionData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), mid))
                            .url(requestUrl)
                            .releaseTime(crawlerRequestRecord.getReleaseTime())
                            .content(interactionData)
                            .resultLabelTag(interaction)
                            .build();
                    crawlerDataList.add(crawlerInteractionData);
                }else {
                    logger.info("wash interaction but json data is empty");
                }
            }
        }
        return crawlerDataList;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexRegex);
        addUrlRegular(searchKWUrlRegex);
        addUrlRegular(searchAuthorUrlRegex);
        addUrlRegular(searchMidUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {
    }

    @Override
    public String domain() {
        return "weibo";
    }

    /**
     * 转换map到url参数字符串
     * @param source url参数的map
     * @return 拼接好的url参数字符串
     */
    public static String asUrlParams(Map<String, Object> source){
        Iterator<String> it = source.keySet().iterator();
        StringBuilder paramStr = new StringBuilder();
        while (it.hasNext()){
            String key = it.next();
            String value = (String) source.get(key);
            if (StringUtils.isBlank(value)){
                continue;
            }
            try {
                // URL 编码
                value = URLEncoder.encode(value, "utf-8");
            } catch (UnsupportedEncodingException e) {
                // do nothing
                logger.error("url encode error");
            }
            paramStr.append("&").append(key).append("=").append(value);
        }
        // 去掉第一个&
        return paramStr.substring(1);
    }

    /**
     * 将url参数转换成map
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    public static Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<>();
        String param = null;
        if (url.contains("?")){
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    /**
     * 获取精确到秒的时间戳
     * @return long time
     */
    public static String getSecondTimestamp(long date){
        if (0 == date) {
            return "0";
        }
        String timestamp = String.valueOf(date);
        int length = timestamp.length();
        if (length > 3) {
            return timestamp.substring(0,length-3);
        } else {
            return "0";
        }
    }

    private long parseGMTTime(String createTime) throws ParseException {
        SimpleDateFormat sdf = new SimpleDateFormat("EEE MMM dd HH:mm:ss Z yyyy", Locale.US);
        Date date = sdf.parse(createTime);
        return date.getTime();
    }


    private void initCheckDownloadRTimes(){
        synchronized (lock){
            downloadTimes.set(0);
            beginTime.set(0L);
            endTime.set(0L);
        }
    }

    /**
     * 脚本内一小时内下载计数器
     * @return 是否在下载限制内
     */
    private boolean checkDownloadTimes(){
        synchronized (lock){
            int getDownloadTimes = downloadTimes.get();
            long timeMillis = System.currentTimeMillis();
            long oneHour = 3600L * 1000;
            if (getDownloadTimes == 0){
                logger.info("采集开始初始化下载次数和时间区间");
                downloadTimes.getAndIncrement();
                beginTime.set(timeMillis);
                endTime.set(timeMillis + oneHour);
                return true;
            }
            if (0 < getDownloadTimes && getDownloadTimes < 50000){
                long bTime = beginTime.get();
                long eTime = endTime.get();
                //不满一小时开始计数
                if (bTime <= timeMillis && timeMillis <= eTime){
                    downloadTimes.getAndIncrement();
                }
                //到达一小时更新时间区间和重置下载次数
                if (eTime < timeMillis){
                    downloadTimes.set(1);
                    beginTime.set(timeMillis);
                    endTime.set(timeMillis + oneHour);
                }
                return true;
            }
            if (getDownloadTimes >= 50000){
                logger.info("一小时内下载次数达到50000次，终止采集程序");
                downloadTimes.set(0);
                beginTime.set(0L);
                endTime.set(0L);
                return false;
            }
            return true;
        }
    }
}
