package com.chance.cc.crawler.development.scripts.weibo.weiboapi;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import com.google.gson.JsonObject;
import net.lingala.zip4j.core.ZipFile;
import net.lingala.zip4j.exception.ZipException;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.DigestUtils;

import java.io.*;
import java.util.*;

/**
 * @ClassName weibo
 * @Description TODO
 * @Author songding
 * @Date 2021/9/23 17:21
 * @Version 1.0
 * 微博新接口  新脚本
 **/
public class WeiboApiNewCrawlerScript extends CrawlerCommonScript {
    private static Logger log = LoggerFactory.getLogger(WeiboApiNewCrawlerScript.class);
    public static final String domain = "weibo";
    public static final String site = "newWeibo";
    private static final String DOMAIN_RESULT_JSON_RECORD_TAG = "domain_result_json";//初始record结果字段
    private static final String check_the_retry = "check_the_retry";

    private static final String weiboUrl = "https://weibo.com/";
    //微博按作者uid采集-历史接口  微博按关键词采集-历史接口
    private static final String createUrl = "https://c.api.weibo.com/2/search/statuses/historical/create.json";//创建任务
    private static final String checkUrlRegulars = "https://c.api.weibo.com/2/search/statuses/historical/check.json\\S*";//检查任务
    private static final String checkUrl  = "https://c.api.weibo.com/2/search/statuses/historical/check.json?task_id=%s&timestamp=%s&signature=%s";
    private static final String downloadUrlRegulars = "https://c.api.weibo.com/2/search/statuses/historical/download.json\\S*";
    private static final String downloadUrl = "https://c.api.weibo.com/2/search/statuses/historical/download.json?task_id=%s&timestamp%s&signature=%s";

    //微博关键词订阅
    private static final String pushRegulars = "https://c.api.weibo.com/commercial/push//S*";
    private static final String pushUrl = "https://c.api.weibo.com/commercial/push?%s";

    //微博根据关键字搜集微博   https://open.weibo.com/wiki/C/2/search/statuses/limited
    private static final String limitRegulars = "https://c.api.weibo.com/2/search/statuses/limited.json\\S*";
    private static final String limitUrl = "https://c.api.weibo.com/2/search/statuses/limited.json?%s";

    //微博根据mid获取 转发数 评论数
    private static final String countRegulars = "https://api.weibo.com/2/statuses/count.json\\S*";
    private static final String countUrl = "https://api.weibo.com/2/statuses/count.json?%s";

    //微博根据mid采集一条微博信息  返回一条微博的全部转发微博列表。
    private static final String allRegulars = "https://c.api.weibo.com/2/statuses/repost_timeline/all.json\\S*";
    private static final String allUrl = "https://c.api.weibo.com/2/statuses/repost_timeline/all.json?%s";

    //微博根据mid采集互动量 微博的id为必须参数 批量获取指定微博的转发数评论数喜欢数。  https://open.weibo.com/wiki/C/2/statuses/count/biz
    private static final String countBizRegulars = "https://c.api.weibo.com/2/statuses/count/biz.json\\S*";
    private static final String countBizUrl = "https://c.api.weibo.com/2/statuses/count/biz.json?%s";

    //微博根据mid返回一条微博的全部评论列表
    private static final String showAllRegulars = "https://c.api.weibo.com/2/comments/show/all.json\\S*";
    private static final String showAllUrl = "https://c.api.weibo.com/2/comments/show/all.json?%s";

    //根据微博ID批量获取微博信息。
    private static final String batchBizRegulars= "https://c.api.weibo.com/2/statuses/show_batch/biz.json\\S*";
    private static final String batchBizUrl= "https://c.api.weibo.com/2/statuses/show_batch/biz.json?%s";

    //微博根据uid采集用户信息  批量获取其他用户的基本信息。  https://c.api.weibo.com/2/users/show_batch/other.json
    private static final String otherRegulars = "https://c.api.weibo.com/2/users/show_batch/other.json\\S*";
    private static final String otherUrl = "https://c.api.weibo.com/2/users/show_batch/other.json?%s";
    private static final String otherUrlNoParams = "https://c.api.weibo.com/2/users/show_batch/other.json";

    //微博根据uid获取用户标签 https://c.api.weibo.com/2/tags/tags_batch/other.json
    private static final String otherTagRegulars = "https://c.api.weibo.com/2/tags/tags_batch/other.json\\S*";
    private static final String otherTagUrl = "https://c.api.weibo.com/2/tags/tags_batch/other.json?%s";

    //微博根据uid获取用户年龄 https://open.weibo.com/wiki/C/2/users/birthday/other
    private static final String otherAgeRegulars = "https://c.api.weibo.com/2/users/birthday/other.json\\S*";
    private static final String otherAgaUrl = "https://c.api.weibo.com/2/users/birthday/other.json?%s";
    private static final String otherAgaUrlNoParams = "https://c.api.weibo.com/2/users/birthday/other.json";
    @Override
    public void initUrlRegulars() {
        addUrlRegular(countRegulars);
        addUrlRegular(pushRegulars);
        addUrlRegular(batchBizRegulars);
        addUrlRegular(weiboUrl);
        addUrlRegular(createUrl);
        addUrlRegular(checkUrlRegulars);
        addUrlRegular(downloadUrlRegulars);
        addUrlRegular(countBizRegulars);
        addUrlRegular(otherRegulars);
        addUrlRegular(otherAgeRegulars);
        addUrlRegular(allRegulars);
        addUrlRegular(showAllRegulars);
    }

    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> prepareLinks = new ArrayList<>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        if (supportSourceRecords != null){
            for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords){
                String url = supportSourceRecord.getHttpRequest().getUrl();
                if (url.contains("keys")){
                    HttpPage page = supportSourceRecord.getInternalDownloadPage();
                    String msg = page.getJson().jsonPath($_type + ".msg").get();
                    if (msg.equals("success")){
                        List<String> all = page.getJson().jsonPath($_type + ".content").all();
                        StringBuffer paramss = new StringBuffer();
                        CrawlerRequestRecord record = null;
                        for (String data :all){
                            JSONObject jsonObject = JSONObject.parseObject(data);
                            JSONObject keyword = jsonObject.getJSONObject("keyword");
                            Set<String> strings = keyword.keySet();
                            Iterator<String> iterator = strings.iterator();
                            String params = null;
                            Map<String,String> condition = new HashMap<>();
                            while(iterator.hasNext()){
                                String key = iterator.next();
                                String value = keyword.getString(key);
                                params = key + "=" + value;
                                condition.put(key,value);
                            }
                            if (requestUrl.matches(createUrl)){
                                crawlerRequestRecord.tagsCreator().bizTags().addCustomKV("condition",condition);
                                record =  this.getCreateUrl(crawlerRequestRecord);
                            }
                            paramss.append("&").append(params);
                            if (requestUrl.matches(countBizRegulars)){
                                record = this.getCountBiz(crawlerRequestRecord,paramss);
                            }else if (requestUrl.matches(otherRegulars)){
                                record = this.getOther(crawlerRequestRecord,paramss);
                            }else if (requestUrl.matches(otherTagRegulars)){
                                record = this.getOtherTag(crawlerRequestRecord,paramss);
                            }else if (requestUrl.matches(otherAgeRegulars)){
                                record = this.getOtherAge(crawlerRequestRecord,paramss);
                            }else if (requestUrl.matches(limitRegulars)){
                                record = this.getLimit(crawlerRequestRecord,paramss);
                            }else if (requestUrl.matches(allRegulars)){
                                record = this.getAllWeibo(crawlerRequestRecord,paramss);
                            }else if (requestUrl.matches(batchBizRegulars)){
                                record = this.getBitchBiz(crawlerRequestRecord,paramss);
                            }else if (requestUrl.matches(pushRegulars)){
                                record = this.getPush(crawlerRequestRecord,paramss);
                            }

                        }

                        prepareLinks.add(record);
                    }else{
                        log.info("Failed to obtain the keyword. Procedure");
                    }
                }
            }
        }

        if (crawlerRequestRecord.tagsCreator().bizTags().hasKVTag(DOMAIN_RESULT_JSON_RECORD_TAG)) {
            KVTag domainResultJson = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(DOMAIN_RESULT_JSON_RECORD_TAG);
            CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()), CrawlerDomainUrls.class);
            String url = crawlerDomainUrls.getUrl();
            Json urlJson = new Json(url);
            String itemUrl = urlJson.jsonPath($_type + ".url").get();
            if (itemUrl.matches(checkUrlRegulars)){
                long releaseTimeToLong = Long.parseLong(urlJson.jsonPath($_type + ".time").get());
                String task_id = urlJson.jsonPath($_type + ".task_id").get();
                String id = urlJson.jsonPath($_type + ".id").get();
                String secret_key = urlJson.jsonPath($_type + ".secret_key").get();
                String ids = urlJson.jsonPath($_type+".ids").get();
                crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag(DOMAIN_RESULT_JSON_RECORD_TAG);
                CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                record.tagsCreator().bizTags().addCustomKV("ids",ids);
                record.tagsCreator().bizTags().addCustomKV("task_id",task_id);
                record.tagsCreator().bizTags().addCustomKV("id",id);
                record.tagsCreator().bizTags().addCustomKV("secret_key",secret_key);
                prepareLinks.add(record);
            }
            if (itemUrl.matches(countRegulars)){
                String id = urlJson.jsonPath($_type + ".mid").get();
                crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag(DOMAIN_RESULT_JSON_RECORD_TAG);
                String countUrl1 = String.format(countUrl,"ids="+id);
                CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(countUrl1)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                prepareLinks.add(record);
            }
        }

        return prepareLinks;
    }




    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();
        if (page.getStatusCode() != 200 || page.isDownloadSuccess() != true){
            log.error("download error or page != 200  code="+page.getStatusCode());
            this.recordAgainDownload(crawlerRecord,page,parseLinks);
            crawlerRecord.setNeedWashPage(false);
            return parseLinks;
        }
        String url = crawlerRecord.getHttpRequest().getUrl();

        if (url.matches(checkUrlRegulars)){
            this.getDownloadUrl(crawlerRecord,page,parseLinks);
        }
        if (url.matches(downloadUrlRegulars)){
            this.getDownloadData(crawlerRecord,page,parseLinks);
        }

        if (url.matches(limitRegulars)){
            this.getOtherRecord(crawlerRecord,page,parseLinks);
        }
        //通过订阅的信息同时获取 用户年龄 标签 帖子互动量  帖子评论数
        if (url.matches(pushRegulars)){
            this.getPushAll(crawlerRecord,page,parseLinks);
        }
        return parseLinks;
    }

    private void getPushAll(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        //获取since_id防止获取到重复数据
        String since_id = page.getJson().jsonPath($_type + ".id").get();
        //获取 订阅id
        String subid = page.getJson().jsonPath($_type + ".match_info.subid").get();
        StringBuffer pushParams = new StringBuffer();
        pushParams.append("&since_id="+since_id+"&").append("subid="+subid);
        CrawlerRequestRecord pushRecord = this.getPush(crawlerRecord,pushParams);
        parseLinks.add(pushRecord);
        //获取用户的id  同时获取用户的标签  和年龄信息
        String uid = page.getJson().jsonPath($_type + ".text.status.user.id").get();
        //用户标签
        StringBuffer uidsParams = new StringBuffer();
        uidsParams.append("&uids="+uid);
        CrawlerRequestRecord otherTagRecord = getOtherTag(crawlerRecord,uidsParams);
        parseLinks.add(otherTagRecord);
        //用户年龄
        StringBuffer uidParams = new StringBuffer();
        uidsParams.append("&uid="+uid);
        CrawlerRequestRecord otherAgeRecord = getOtherTag(crawlerRecord,uidsParams);
        parseLinks.add(otherAgeRecord);
        //获取 微博的id
        String mid = page.getJson().jsonPath($_type + ".text.status.mid").get();
        //帖子互动量
        StringBuffer idsParams = new StringBuffer();
        uidsParams.append("&ids="+mid);
        CrawlerRequestRecord countBizRecord = getCountBiz(crawlerRecord,uidsParams);
        parseLinks.add(countBizRecord);
        //帖子评论列表
        StringBuffer idParams = new StringBuffer();
        uidsParams.append("&id="+mid);
        CrawlerRequestRecord showAllRecord = getShowAll(crawlerRecord,uidsParams);
        parseLinks.add(showAllRecord);

    }


    /*
    * 将下载的压缩包解压
    * */
    private void getDownloadData(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String path = "/data/chance_crawler_test/data/weibo/weiboBiz/";
        String task_id = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("task_id");
        String secret_key = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("secret_key");
       // String TimeNow = new SimpleDateFormat("yyyy-MM-dd").format(Calendar.getInstance().getTime());
        String zipPath = path;
        try {
            //下载文件 输出到本地
            IOUtils.write(page.getBytes(),new FileOutputStream(zipPath+".zip"));
        } catch (IOException e) {
            log.error("Failed to write the compressed package. Procedure");
        }
        //解压文件
        try {
            ZipFile zipFile = new ZipFile(zipPath+".zip");
            zipFile.setFileNameCharset("GBK");
            zipFile.setPassword(task_id+secret_key);
            zipFile.extractAll(zipPath);
        } catch (ZipException e) {
            log.error("Unpack the failure");
        }
        //读取文件
        try {
            List<String> strings = IOUtils.readLines(new InputStreamReader(new FileInputStream(new File(zipPath + "\\"+task_id+".log"))));
            page.setJson(new Json(JSONObject.toJSONString(strings)));

        } catch (IOException e) {
            log.error("Read task failed");
        }

    }
    /*
     *检查任务是否创建完成  完成后进入download
     * */
    private void getDownloadUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String status = page.getJson().jsonPath($_type + ".status").get();
        if (status.equals("false")){
            log.info("The task is not created,Wait 5 minutes and try again");
            try {
                Thread.sleep(60000*5);//线程等待五分钟后，进行重新下载
                crawlerRecord.tagsCreator().bizTags().addCustomKV(check_the_retry,check_the_retry);
                this.recordAgainDownload(crawlerRecord,page,parseLinks);
                crawlerRecord.setNeedWashPage(false);
                return;
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
        String task_id = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("task_id");
        String time = String.valueOf(System.currentTimeMillis());
        String id = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("id");
        String secret_key = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("secret_key");
        String url = String.format(downloadUrl,task_id,time, DigestUtils.md5DigestAsHex((id+secret_key+time).getBytes()));
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parseLinks.add(record);
    }
    /*
    * 根据得到的微博  是否需要其他信息
    * */
    private void getOtherRecord(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        List<String> all = page.getJson().jsonPath($_type + ".statuses").all();
        for (String data :all){
            JSONObject jsonObject = JSONObject.parseObject(data);
            String mid = jsonObject.getString("mid");
            JSONObject user = jsonObject.getJSONObject("user");
            String id = user.getString("id");
            String screen_name = user.getString("screen_name");
            StringBuffer params = new StringBuffer();
            if (crawlerRecord.tagsCreator().bizTags().hasKVTag("midInteraction")){
                params.append("&").append("ids="+mid);
                CrawlerRequestRecord record  = this.getCountBiz(crawlerRecord,params);
                parseLinks.add(record);
            }
            if (crawlerRecord.tagsCreator().bizTags().hasKVTag("userMessage")){
                params.append("&").append("id="+id+"&").append("screen_name="+screen_name);
                CrawlerRequestRecord record  = this.getOther(crawlerRecord,params);
                parseLinks.add(record);
            }
            if (crawlerRecord.tagsCreator().bizTags().hasKVTag("userTag")){
                params.append("&").append("uids="+id);
                CrawlerRequestRecord record = this.getOtherTag(crawlerRecord,params);
                parseLinks.add(record);
            }
            if (crawlerRecord.tagsCreator().bizTags().hasKVTag("userAge")){
                params.append("&").append("uid"+id);
                CrawlerRequestRecord record = this.getOtherAge(crawlerRecord,params);
                parseLinks.add(record);
            }
        }
    }
    /*
    根据微博ID批量获取微博信息。
    * */
    private CrawlerRequestRecord getBitchBiz(CrawlerRequestRecord crawlerRecord, StringBuffer params) {
        String paramss = String.valueOf(params);
        paramss = paramss.substring(1,paramss.length());
        String url = String.format(batchBizUrl,paramss);
        url = url +"&access_token=2.00o4_w1HrAaeYBedf38e38b8SnITmD";
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        return record;
    }
    /*
    * 关键词订阅
    * */
    private CrawlerRequestRecord getPush(CrawlerRequestRecord crawlerRecord, StringBuffer params) {
        String paramss = String.valueOf(params);
        paramss = paramss.substring(1,paramss.length());
        String url = String.format(pushUrl,paramss);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        return record;
    }
    /*
    * 根据mid 查新微博评论列表
    * */
    private CrawlerRequestRecord getShowAll(CrawlerRequestRecord crawlerRecord, StringBuffer uidsParams) {
        String paramss = String.valueOf(uidsParams);
        paramss = paramss.substring(1,paramss.length());
        String url = String.format(showAllUrl,paramss);
        url = url +"&access_token=2.00o4_w1HrAaeYBedf38e38b8SnITmD";
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        return record;
    }
    /*
    * 根据mid查询微博信息
    * */
    private CrawlerRequestRecord getAllWeibo(CrawlerRequestRecord crawlerRecord, StringBuffer params) {
        String paramss = String.valueOf(params);
        paramss = paramss.substring(1,paramss.length());
        String url = String.format(allUrl,paramss);
        url = url +"&access_token=2.00o4_w1HrAaeYBedf38e38b8SnITmD";
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        return record;
    }

    /*
    * 根据关键词搜索指定微博
    * */
    private CrawlerRequestRecord getLimit(CrawlerRequestRecord crawlerRecord, StringBuffer params) {
        String paramss = String.valueOf(params);
        paramss = paramss.substring(1,paramss.length());
        String url = String.format(limitUrl,paramss);
        url = url +"&access_token=2.00o4_w1HrAaeYBedf38e38b8SnITmD";
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        return record;
    }

    /*
     * 根据Uid  获取用户标签
     * */
    private CrawlerRequestRecord getOtherTag(CrawlerRequestRecord crawlerRecord, StringBuffer params) {
        String paramss = String.valueOf(params);
        paramss = paramss.substring(1,paramss.length());
        String url = String.format(otherTagUrl,paramss);
        url = url +"&access_token=2.00o4_w1HrAaeYBedf38e38b8SnITmD";
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        return record;
    }
    /*
    * 根据uid 获取作者年龄
    * */
    private CrawlerRequestRecord getOtherAge(CrawlerRequestRecord crawlerRecord, StringBuffer params) {
        String url = null;
        if (params != null){
            String paramss = String.valueOf(params);
            paramss = paramss.substring(1,paramss.length());
            url = String.format(otherAgaUrl,paramss);
        }else{
            url = otherAgaUrlNoParams;
        }
        url = url +"&access_token=2.00o4_w1HrAaeYBedf38e38b8SnITmD";
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        return record;
    }
    /*
     * 根据Uid  获取用户信息
     * */
    private CrawlerRequestRecord getOther(CrawlerRequestRecord crawlerRecord, StringBuffer params) {
        String url = null;
        if (params != null){
            String paramss = String.valueOf(params);
            paramss = paramss.substring(1,paramss.length());
            url = String.format(otherUrl,paramss);
        }else{
            url = otherUrlNoParams;
        }
        url = url +"&access_token=2.00o4_w1HrAaeYBedf38e38b8SnITmD";
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        return record;
    }

    /*
    * 根据mid采集互动量 微博id为必须参数
    * */
    private CrawlerRequestRecord getCountBiz(CrawlerRequestRecord crawlerRecord, StringBuffer params) {
        String paramss = String.valueOf(params);
        paramss = paramss.substring(1,paramss.length());
        String url = String.format(countBizUrl,paramss);
        url = url +"&access_token=c25e5caf32f3b72319867990e59c2b1f";
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        return record;
    }




    /*
    * 创建url
    * */
    private CrawlerRequestRecord getCreateUrl(CrawlerRequestRecord crawlerRecord) {
        KVTag condition = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("condition");
        Map<String,Object> map = (Map<String, Object>) condition.getVal();
        Long starttime = (Long) map.get("starttime");
        Long endtime = (Long) map.get("endtime");
        String q = (String) map.get("q");
        String ids = (String) map.get("ids");
        CrawlerRequestRecord record =CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(createUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        record.getHttpRequest().setMethod("post");
        JsonObject jsonObject = new JsonObject();
        jsonObject.addProperty("starttime",starttime);
        jsonObject.addProperty("endtime",endtime);
        jsonObject.addProperty("q",q);
        jsonObject.addProperty("ids",ids);
        HttpRequestBody body = HttpRequestBody.json(jsonObject.toString(),"UTF-8");
        record.getHttpRequest().setRequestBody(body);
        return record;
    }


    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> dataList = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (url.matches(createUrl)){//创建任务 获取创建任务的信息
            this.washCreateUrl(crawlerRecord,page,dataList);
        }
        if (url.matches(downloadUrlRegulars)){//开始清洗任务数据
            this.washDownload(crawlerRecord,page,dataList);
        }

        if (url.matches(limitRegulars)){//根据关键字获取微博信息
            this.washLimit(crawlerRecord,page,dataList);
        }
        if (url.matches(batchBizRegulars)){//根据微博ID批量获取微博信息。
            this.washBatchBiz(crawlerRecord,page,dataList);
        }
        if (url.matches(allRegulars)){//根据mid获取指定微博
            this.washAllWeibo(crawlerRecord,page,dataList);
        }
        if (url.matches(countBizRegulars)){//根据mid获取互动量
            this.washCountBiz(crawlerRecord,page,dataList);
        }
        if (url.matches(otherRegulars)){//根据uid获取作者信息
            this.washOther(crawlerRecord,page,dataList);
        }
        if (url.matches(otherTagRegulars)){//根据uid获取用户标签
            this.washOtherTag(crawlerRecord,page,dataList);
        }
        if (url.matches(otherAgeRegulars)){//根据uid获取用户年龄
            this.washOtherAge(crawlerRecord,page,dataList);
        }
        if (url.matches(pushRegulars)){//微博订阅关键词
            this.washPushAll(crawlerRecord,page,dataList);
        }
        if (url.matches(showAllRegulars)){//帖子的评论列表
            this.washShowAll(crawlerRecord,page,dataList);
        }
        if (url.matches(countRegulars)){//获取微博的转发数和评论数
            this.washCount(crawlerRecord,page,dataList);
        }
        return dataList;
    }

    private void washCount(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List list = page.getJson().toObject(List.class);
        for (Object obj :list){
            JSONObject jsonObject = JSONObject.parseObject(String.valueOf(obj));
            String id = jsonObject.getString("uid");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,id))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(System.currentTimeMillis())
                    .content(String.valueOf(obj))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            dataList.add(crawlerData);
        }
    }

    private void washShowAll(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List<String> all = page.getJson().jsonPath($_type + ".comments").all();
        for (String  data : all){
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,new Random().nextInt(1000000000)))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(System.currentTimeMillis())
                    .content(data)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .flowInPipelineTag("kafka")
                    .build();
            dataList.add(crawlerData);
        }
    }

    private void washPushAll(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        String mid = page.getJson().jsonPath($_type + ".text.status.mid").get();//根据id继续往下获取数据
        String since_id = page.getJson().jsonPath($_type+".id").get();
        CrawlerData idData = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,mid))
                .url(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .addContentKV("mid",mid)
                .addContentKV("url","https://api.weibo.com/2/statuses/count.json")
                .flowInPipelineTag("redis")
                .build();
        idData.setFilterPipelineResult(true);
        dataList.add(idData);

        String text = page.getJson().jsonPath($_type + ".text").get();//获取数据

        CrawlerData textData = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,since_id))
                .url(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .content(text)
                .flowInPipelineTag("kafka")
                .build();
        idData.setFilterPipelineResult(true);
        dataList.add(idData);
    }

    private void washAllWeibo(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List<String> all = page.getJson().jsonPath($_type + ".reposts").all();
        String previous_cursor = page.getJson().jsonPath($_type + ".previous_cursor").get();
        String next_cursor = page.getJson().jsonPath($_type + ".next_cursor").get();
        String total_number = page.getJson().jsonPath($_type + ".total_number").get();
        for (String data :all){
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,new Random().nextInt(1000000000)))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(System.currentTimeMillis())
                    .content(data)
                    .addContentKV("previous_cursor",previous_cursor)
                    .addContentKV("next_cursor",next_cursor)
                    .addContentKV("total_number",total_number)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .flowInPipelineTag("kafka")
                    .build();
            dataList.add(crawlerData);
        }

    }

    private void washDownload(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List list = page.getJson().toObject(List.class);
        for (Object obj :list){
            JSONObject jsonObject = JSONObject.parseObject(String.valueOf(obj));
            String id = jsonObject.getString("uid");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,id))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(System.currentTimeMillis())
                    .content(String.valueOf(obj))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            dataList.add(crawlerData);
        }
    }
    //根据微博ID批量获取微博信息。
    private void washBatchBiz(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List<String> all = page.getJson().jsonPath($_type + ".statuses").all();
        for (String data :all){
            JSONObject jsonObject = JSONObject.parseObject(data);
            String id = jsonObject.getString("id");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,id))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(System.currentTimeMillis())
                    .content(data)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            dataList.add(crawlerData);
        }
    }

    private void washLimit(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List<String> all = page.getJson().jsonPath($_type + ".statuses").all();
        for (String data :all){
            JSONObject jsonObject = JSONObject.parseObject(data);
            String mid = jsonObject.getString("mid");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,mid))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(System.currentTimeMillis())
                    .content(data)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            dataList.add(crawlerData);
        }
    }


    private void washOtherAge(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        String birthday = page.getJson().jsonPath($_type + ".birthday").get();
        String birthday_visible = page.getJson().jsonPath($_type + ".birthday_visible").get();
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,new Random().nextInt(1000000000)))
                .url(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .addContentKV("birthday",birthday)
                .addContentKV("birthday_visible",birthday_visible)
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData.setFilterPipelineResult(true);
        dataList.add(crawlerData);
    }

    private void washOtherTag(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List list = page.getJson().toObject(List.class);
        for (Object obj : list){
            JSONObject jsonObject = JSONObject.parseObject(String.valueOf(obj));
            String id = jsonObject.getString("id");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,id))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(System.currentTimeMillis())
                    .content(String.valueOf(obj))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            dataList.add(crawlerData);
        }
    }


    private void washOther(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List<String> all = page.getJson().jsonPath($_type + ".users").all();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (all.size() < 1){
            log.error("interaction is null url = [{}]",url);
        }
        for (String data:all){
            JSONObject jsonObject = JSONObject.parseObject(data);
            String id = jsonObject.getString("id");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.article,id))
                    .url(url)
                    .releaseTime(System.currentTimeMillis())
                    .content(data)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            dataList.add(crawlerData);
        }
    }

    private void washCountBiz(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        String url = crawlerRecord.getHttpRequest().getUrl();
        List list = page.getJson().toObject(List.class);
        if (list.size() < 1){
            log.error("interaction is null url = [{}]",url);
        }
        for (Object o : list){
            JSONObject jsonObject = JSONObject.parseObject(String.valueOf(o));
            String id = jsonObject.getString("id");
            String comments = jsonObject.getString("comments");//评论数
            String reposts = jsonObject.getString("reposts");//转发量
            String likes = jsonObject.getString("likes");//点赞数
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(),CrawlerEnum.CrawlerDataType.interaction,id))
                    .url(url)
                    .releaseTime(System.currentTimeMillis())
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .addContentKV("id",id)
                    .addContentKV("comments",comments)
                    .addContentKV("reposts",reposts)
                    .addContentKV("likes",likes)
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            dataList.add(crawlerData);
        }

    }

    private void washCreateUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        String id = page.getJson().jsonPath($_type+".id").get();////任务创建者ID
        String q = page.getJson().jsonPath($_type+".q").get();//关键词
        String ids = page.getJson().jsonPath($_type+".ids").get();//指定批量用户id
        String province = page.getJson().jsonPath($_type+".province").get();//省份
        String city = page.getJson().jsonPath($_type+".city").get();//城市
        String starttime = page.getJson().jsonPath($_type+".starttime").get();//时间范围 开始
        String endtime = page.getJson().jsonPath($_type+".endtime").get();//时间范围  结束
        String type = page.getJson().jsonPath($_type+".type").get();//微博类型，0：原创、1：转发，默认为空（全部）
        String hasv = page.getJson().jsonPath($_type+".hasv").get();//是否为v用户发言，0：否、1：是，默认为空（全部）。
        String onlynum = page.getJson().jsonPath($_type+".onlynum").get();//返回指定的百分比
        String task_id = page.getJson().jsonPath($_type+".task_id").get();//任务ID
        String secret_key = page.getJson().jsonPath($_type+".secret_key").get();  //任务创建成功后，返回密钥，用于查看任务执行状态、及下载结果数据
        String time = String.valueOf(System.currentTimeMillis());
        String checkRecordUrl = String.format(checkUrl,task_id,time, DigestUtils.md5DigestAsHex((id+secret_key+time).getBytes()));
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article,task_id))
                .url(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .addContentKV("url",checkRecordUrl)
                .addContentKV("time",time)
                .addContentKV("id",id)
                .addContentKV("q",q)
                .addContentKV("ids",ids)
                .addContentKV("province",province)
                .addContentKV("city",city)
                .addContentKV("starttime",starttime)
                .addContentKV("endtime",endtime)
                .addContentKV("type",type)
                .addContentKV("hasv",hasv)
                .addContentKV("onlynum",onlynum)
                .addContentKV("task_id",task_id)
                .addContentKV("secret_key",secret_key)
                .flowInPipelineTag("redis")
                .build();
        crawlerData.setFilterPipelineResult(true);
        dataList.add(crawlerData);
    }

    private void recordAgainDownload(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        Integer integerCount = 0;
        if (!crawlerRecord.tagsCreator().bizTags().hasKVTag(check_the_retry)){
            String count = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("count");
            if (count == null){
                integerCount = 1;
            }else{
                integerCount = Integer.valueOf(count);
                if (integerCount >= 10){
                    log.error("The number of downloads exceeds the limit");
                    return;
                }
                integerCount+=1;
            }
        }

        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        if (!crawlerRecord.tagsCreator().bizTags().hasKVTag(check_the_retry)){
            record.setRecordKey(crawlerRecord.getHttpRequest().getUrl()+integerCount);
            record.tagsCreator().bizTags().addCustomKV("count",integerCount);
        }
        parseLinks.add(record);
    }
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        return crawlerRecord.tagsCreator().bizTags().site().equals(site);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return domain;
    }
}
