package com.chance.cc.crawler.development.scripts.weibo.weiboapi;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.tags.KVTag;
import net.lingala.zip4j.core.ZipFile;
import net.lingala.zip4j.exception.ZipException;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.DigestUtils;

import java.io.*;
import java.text.ParseException;
import java.util.*;

/**
 * @ClassName wb
 * @Description TODO
 * @Author songding
 * @Date 2021/11/25 15:33
 * @Version 1.0
 **/
public class WBApiNewCrawlerScript extends CrawlerCommonScript {
    private static Logger log = LoggerFactory.getLogger(WBApiNewCrawlerScript.class);
    public static final String domain = "weibo";
    public static final String site = "wbApi";
    private static final String DOMAIN_RESULT_JSON_RECORD_TAG = "domain_result_json";//初始record结果字段
    private static final String check_the_retry = "check_the_retry";
    private static final String access_token = "2.00o4_w1HrAaeYBedf38e38b8SnITmD";
    private static final Object obj = new Object();

    private static final String weiboUrl = "https://weibo.com/";
    //微博按作者uid采集-历史接口  微博按关键词采集-历史接口
    private static final String createUrl = "https://c.api.weibo.com/2/search/statuses/historical/create.json";//创建任务
    private static final String checkUrlRegulars = "https://c.api.weibo.com/2/search/statuses/historical/check\\S*";//检查任务
    private static final String checkUrl = "https://c.api.weibo.com/2/search/statuses/historical/check.json?task_id=%s&timestamp=%s&signature=%s&access_token=%s";
    private static final String downloadUrlRegulars = "https://c.api.weibo.com/2/search/statuses/historical/download\\S*";
    private static final String downloadUrl = "https://c.api.weibo.com/2/search/statuses/historical/download.json?task_id=%s&timestamp=%s&signature=%s&access_token=%s";

    private static final String authorUrl = "https://c.api.weibo.com/2/users/show_batch/other.json?uids=%s&access_token=%s";
    private static final String authorRegulars = "https://c.api.weibo.com/2/users/show_batch/other\\S*";

    private static final String interactionUrl = "https://c.api.weibo.com/2/statuses/count/biz.json?ids=%s&access_token=%s";
    private static final String interactionRegulars = "https://c.api.weibo.com/2/statuses/count/\\S*";

    @Override
    public void initUrlRegulars() {
        addUrlRegular(createUrl);
        addUrlRegular(checkUrlRegulars);
        addUrlRegular(checkUrl);
        addUrlRegular(downloadUrlRegulars);
        addUrlRegular(downloadUrl);
        addUrlRegular(authorRegulars);
        addUrlRegular(interactionRegulars);
    }
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> prepareLinks = new ArrayList<>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        if (supportSourceRecords != null) {
            for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
                String url = supportSourceRecord.getHttpRequest().getUrl();
                if (url.contains("keys")) {
                    HttpPage page = supportSourceRecord.getInternalDownloadPage();
                    String msg = page.getJson().jsonPath($_type + ".msg").get();
                    if (msg.equals("success")) {
                        List<String> all = page.getJson().jsonPath($_type + ".content").all();
                        StringBuffer sb = new StringBuffer();
                        for (String data :all){
                            JSONObject jsonObject = JSONObject.parseObject(data);
                            String keyword = jsonObject.getString("keyword");//一个请求最多一千个关键字
                            /*try {
                                keyword = URLEncoder.encode(keyword,"utf-8");
                                sb.append(keyword).append(",");
                            } catch (UnsupportedEncodingException e) {
                                e.printStackTrace();
                            }*/
                            sb.append(keyword).append(",");
                        }
                        if (requestUrl.matches(createUrl)){
                            this.getCreateUrl(crawlerRequestRecord,prepareLinks,sb);
                        }
                    }
                }
            }
        }
        return prepareLinks;
    }

    /*
     * 创建url
     * */
    private void getCreateUrl(CrawlerRequestRecord crawlerRecord, List<CrawlerRecord> prepareLinks, StringBuffer keyword) {
        KVTag startKey = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("starttime");
        Long starttime = Long.valueOf(String.valueOf(startKey.getVal()));
        KVTag endKey = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("endtime");
        Long endtime = Long.valueOf(String.valueOf(endKey.getVal()));
        CrawlerRequestRecord record =CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(createUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        record.getHttpRequest().setMethod("post");
        Map<String,Object> map = new HashMap<>();
        map.put("q",keyword);
        map.put("starttime",starttime);
        map.put("endtime",endtime);
        map.put("access_token",access_token);
        HttpRequestBody form = HttpRequestBody.form(map, "utf-8");
        record.getHttpRequest().setRequestBody(form);
        prepareLinks.add(record);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (page.getStatusCode() != 200 || page.isDownloadSuccess() != true){
            log.error("download error or page != 200  code="+page.getStatusCode());
            this.recordAgainDownload(crawlerRecord,page,parseLinks);
            crawlerRecord.setNeedWashPage(false);
            return parseLinks;
        }

        if (url.matches(createUrl)){
            this.parseCheck(crawlerRecord,page,parseLinks);
        }
        if (url.matches(checkUrlRegulars)){//检查是否下载成功
            this.parseCheckUrl(crawlerRecord,page,parseLinks);
        }
        if (url.matches(downloadUrlRegulars)){//下载成功后得到压缩包
            this.parseDownload(crawlerRecord,page,parseLinks);//得到压缩包，解压后，同时获取到评论接口
        }
        /*if (url.matches(interactionRegulars)){
            KVTag mids = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("mids");
            if (mids.getVal()==null){
                return parseLinks;
            }
            StringBuffer sb = (StringBuffer) mids.getVal();
            crawlerRecord.tagsCreator().bizTags().addCustomKV("mids",null);
            this.getInteraction(crawlerRecord,parseLinks,sb);
        }
        if (url.matches(authorRegulars)){
            KVTag uids = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("uids");
            if (uids != null){
                if (uids.getVal()==null){
                    return parseLinks;
                }
            }
            Set set = (Set) uids.getVal();
            crawlerRecord.tagsCreator().bizTags().addCustomKV("uids",null);
            this.getAuthorUrl(crawlerRecord,parseLinks,set);
        }*/

        return parseLinks;
    }

    private void parseDownload(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        //String path = "/data/chance_crawler_test/data/weibo/weiboBiz/";
        String path = "F:\\chance_log\\weibo\\";
        String task_id = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("task_id");
        String secret_key = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("secret_key");
        String filePath = path +task_id + secret_key;
        try {
            IOUtils.write(page.getBytes(),new FileOutputStream(filePath + ".zip"));
        } catch (IOException e) {
            log.error(e.getMessage());
            crawlerRecord.setNeedWashPage(false);
        }
        //解压
        try {
            ZipFile zipFile = new ZipFile(filePath + ".zip");
            zipFile.setPassword(task_id + secret_key);
            zipFile.extractAll(filePath);
        } catch (ZipException e) {
            log.error(e.getMessage());
            crawlerRecord.setNeedWashPage(false);
        }
        //读取解压后的文件
        try {
            List<String> strings = IOUtils.readLines(new InputStreamReader(new FileInputStream(new File(filePath + "\\" + task_id + ".log"))));
            page.setJson(new Json(JSONObject.toJSONString(strings)));
        } catch (Exception e) {
            log.error(e.getMessage());
            crawlerRecord.setNeedWashPage(false);
        }
        KVTag startKey = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("starttime");
        Long starttime = Long.valueOf(String.valueOf(startKey.getVal()));
        //转到评论脚本中
        List list = page.getJson().toObject(List.class);
        Set set = new HashSet();
        int i =1;
        StringBuffer sb = new StringBuffer();
        for (Object obj :list){
            JSONObject jsonObject = JSONObject.parseObject(String.valueOf(obj));
            String mid = jsonObject.getString("mid");
            String blog_url = jsonObject.getString("blog_url ");
            String uid = jsonObject.getString("uid");
            this.getCommentUrl(mid,blog_url,parseLinks,crawlerRecord,starttime);
            set.add(uid);
            sb.append(mid).append(",");
            if (i>=99){
                this.getInteraction(crawlerRecord,parseLinks,sb);//互动量接口
                i = 1;
                sb = new StringBuffer();
            }else if (i ==list.size()){
                this.getInteraction(crawlerRecord,parseLinks,sb);//互动量接口
            }
            i++;
        }
        //this.getAuthorUrl(crawlerRecord,parseLinks,set);//作者接口
    }
    private void getInteraction(CrawlerRequestRecord crawlerRecord,List<CrawlerRequestRecord> parseLinks,StringBuffer sb){
        sb.substring(0,sb.length()-1);
        String url = String.format(interactionUrl,sb,access_token);
        CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parseLinks.add(interactionRecord);
    }


    private void getAuthorUrl(CrawlerRequestRecord crawlerRecord,List<CrawlerRequestRecord> parseLinks,Set set){
        Iterator iterator = set.iterator();
        boolean hasTurn = true;
        synchronized (obj){
            while(iterator.hasNext()){
                int i = 1;
                StringBuffer sb = new StringBuffer();
                while(hasTurn){
                    String uid = (String) iterator.next();
                    sb.append(uid).append(",");
                    iterator.remove();
                    if (set.size()<=49){
                        if (set.size()==0){
                            hasTurn = false;
                        }
                    }
                    if (i==49){
                        hasTurn = false;
                    }
                    i++;
                }
                sb.substring(0,sb.length()-1);
                String authorUrl = String.format(this.authorUrl,sb,access_token);
                CrawlerRequestRecord authorRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRecord)
                        .httpUrl(authorUrl)
                        .copyBizTags()
                        .releaseTime(System.currentTimeMillis())
                        .copyResultTags()
                        .build();
                parseLinks.add(authorRecord);
                hasTurn=true;
            }
        }

    }


    //得到评论的接口
    private void getCommentUrl(String mid,String blog_url,List<CrawlerRequestRecord> parseLinks,CrawlerRequestRecord crawlerRecord,Long starttime){
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(blog_url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();

        commentRecord.tagsCreator().bizTags().addSite("tie");
        commentRecord.tagsCreator().bizTags().addCustomKV("mid",mid);
        parseLinks.add(commentRecord);
    }

    private void parseCheckUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String status = page.getJson().jsonPath($_type + ".status").get();
        String id = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("id");
        String task_id = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("task_id");
        String secret_key = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("secret_key");
        if (!status.equals("true")){
            log.info("The task is not created,Wait 5 minutes and try again");
            try {
                synchronized (obj){
                    Thread.sleep(60000*5);//线程等待五分钟后，进行重新下载
                    long time = System.currentTimeMillis();
                    String signature = DigestUtils.md5DigestAsHex((id+secret_key+time).getBytes());
                    String url = String.format(checkUrl,task_id,time,signature,access_token);
                    CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRecord)
                            .httpUrl(url)
                            .releaseTime(System.currentTimeMillis())
                            .copyBizTags()
                            .copyResultTags()
                            .build();
                    record.tagsCreator().bizTags().addCustomKV("id",id);
                    record.tagsCreator().bizTags().addCustomKV("task_id",task_id);
                    record.tagsCreator().bizTags().addCustomKV("secret_key",secret_key);
                    parseLinks.add(record);
                    crawlerRecord.setNeedWashPage(false);
                    return;
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
        long time = System.currentTimeMillis();
        String url = String.format(downloadUrl,task_id,time, DigestUtils.md5DigestAsHex((id+secret_key+time).getBytes()),access_token);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        record.getHttpRequest().setResponseCharset("GBK");
        parseLinks.add(record);

    }

    private void parseCheck(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String id = page.getJson().jsonPath($_type + ".id").get();//任务创建者id
        String q = page.getJson().jsonPath($_type + ".q").get();//关键词
        String task_id = page.getJson().jsonPath($_type + ".task_id").get();//任务id
        String secret_key = page.getJson().jsonPath($_type + ".secret_key").get();//任务创建成功后，返回密钥，用于查看任务执行状态、及下载结果数据
        long time = System.currentTimeMillis();
        String signature = DigestUtils.md5DigestAsHex((id+secret_key+time).getBytes());
        String url = String.format(checkUrl,task_id,time,signature,access_token);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        record.tagsCreator().bizTags().addCustomKV("id",id);
        record.tagsCreator().bizTags().addCustomKV("task_id",task_id);
        record.tagsCreator().bizTags().addCustomKV("secret_key",secret_key);
        parseLinks.add(record);

    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> dataList = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (url.matches(downloadUrlRegulars)){
            this.washArticle(crawlerRecord,page,dataList);
        }
        if (url.matches(authorRegulars)){
            this.washAuthor(crawlerRecord,page,dataList);
        }
        if (url.matches(interactionRegulars)){
            this.washInteraction(crawlerRecord,page,dataList);
        }
        return dataList;
    }

    private void washInteraction(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List list = page.getJson().toObject(List.class);
        for (Object obj :list) {
            JSONObject jsonObject = JSONObject.parseObject(String.valueOf(obj));
            String id = jsonObject.getString("id");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction,new Random().nextInt(100000000)))
                    .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article,id))
                    .content(String.valueOf(obj))
                    .releaseTime(System.currentTimeMillis())
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .build();
            crawlerData.tagsCreator().bizTags().addSiteBiz("interaction");
            dataList.add(crawlerData);
        }
    }

    private void washAuthor(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List<String> all = page.getJson().jsonPath($_type + ".users").all();
        for (String str : all){
            JSONObject jsonObject = JSONObject.parseObject(str);
            String id = jsonObject.getString("id");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article,id))
                    .url("https://weibo.com/u/"+id)
                    .content(String.valueOf(str))
                    .releaseTime(System.currentTimeMillis())
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .build();
            crawlerData.tagsCreator().bizTags().addSiteBiz("author");
            dataList.add(crawlerData);
        }
    }

    private void washArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        List list = page.getJson().toObject(List.class);
        for (Object obj :list) {
            JSONObject jsonObject = JSONObject.parseObject(String.valueOf(obj));
            String mid = jsonObject.getString("mid");
            String uid = jsonObject.getString("uid");
            String created_at = jsonObject.getString("created_at");
            Long releaseTime = 0l;
            try {
                releaseTime = DateUtils.parseDate(created_at,"yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                e.printStackTrace();
            }
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article,mid))
                    .parentId(StringUtils.joinWith("-",crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article,uid))
                    .content(String.valueOf(obj))
                    .releaseTime(releaseTime)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .build();
            crawlerData.tagsCreator().bizTags().addSiteBiz("article");
            dataList.add(crawlerData);
        }
    }


    private void recordAgainDownload(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        Integer integerCount = 0;
        if (!crawlerRecord.tagsCreator().bizTags().hasKVTag(check_the_retry)){
            String count = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("count");
            if (count == null){
                integerCount = 1;
            }else{
                integerCount = Integer.valueOf(count);
                if (integerCount >= 10){
                    log.error("The number of downloads exceeds the limit");
                    return;
                }
                integerCount+=1;
            }
        }

        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        if (!crawlerRecord.tagsCreator().bizTags().hasKVTag(check_the_retry)){
            record.setRecordKey(crawlerRecord.getHttpRequest().getUrl()+integerCount);
            record.tagsCreator().bizTags().addCustomKV("count",integerCount);
        }
        parseLinks.add(record);
    }
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        return crawlerRecord.tagsCreator().bizTags().site().equals(site);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return domain;
    }
}
