package com.chance.cc.crawler.development.scripts.weixin.api;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.tags.KVTag;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;

/**
 * @ClassName weixin
 * @Description TODO
 * @Author songding
 * @Date 2021/10/17 13:18
 * @Version 1.0
 *  * 微信微信互动量更新
 *  * 微信日更文章（按作者采集）
 **/
public class WeiXinApiCrawlerScript extends CrawlerCommonScript {
    private static Logger log = LoggerFactory.getLogger(WeiXinApiSearchKWCrawlerScript.class);
    private static final String domain = "weixin";
    private static final String site = "api";

    //根据关键词接口返回的文章uuid查询发文3天后的阅读在看数
    private static final String  articleRegulars = "https://api.newrank.cn/api/custom/ipsos/weixin/data/back\\S*";
    private static final String  articleUrl = "https://api.newrank.cn/api/custom/ipsos/weixin/data/back";

    //根据账号获取过去某一时间区间内的发文，每天14:00后更新前一天的数据
    private static final String accountRegulars = "https://api.newrank.cn/api/custom/ipsos/weixin/daily/article\\S*";
    private static final String accountUrl = "https://api.newrank.cn/api/custom/ipsos/weixin/daily/article";

    private static final List<String> listUUID = new ArrayList<>();

    //post 请求参数
    public static final String TOKEN_KEY = "key";
    public static final String TOKEN = "vf94a4bdb7b0e49bba2e5z6jt";

    @Override
    public void initUrlRegulars() {
        addUrlRegular(articleRegulars);
        addUrlRegular(accountRegulars);
    }

    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> prepareLinks = new ArrayList<>();
        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        if (supportSourceRecords != null){
            for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords){
                String urlSupport = supportSourceRecord.getHttpRequest().getUrl();
                if (urlSupport.contains("keys")){
                    HttpPage page = supportSourceRecord.getInternalDownloadPage();
                    String msg = page.getJson().jsonPath($_type + ".msg").get();
                    if (msg.equals("success")){
                        List<String> all = page.getJson().jsonPath($_type + ".content").all();
                        StringBuffer paramss = new StringBuffer();
                        CrawlerRequestRecord record = null;
                        for (String data :all){
                            JSONObject jsonObject = JSONObject.parseObject(data);
                            String keyword = jsonObject.getString("keyword");
                            listUUID.add(keyword);
                        }
                    }
                }
            }
        }


        return prepareLinks;
    }
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();
        if (page.getStatusCode() != 200 || page.isDownloadSuccess() != true){
            log.error("download error or page != 200  code="+page.getStatusCode());
            this.recordAgainDownload(crawlerRecord,page,parseLinks);
            crawlerRecord.setNeedWashPage(false);
            return parseLinks;
        }
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (url.matches(articleRegulars)){ //根据关键词接口返回的文章uuid查询发文3天后的阅读在看数
            this.getArticle(crawlerRecord,page,parseLinks);
        }
        if (url.matches(accountRegulars)){
            this.getAccount(crawlerRecord,page,parseLinks);
        }
        return parseLinks;
    }

    private void getAccount(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String startTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("startTime");
        String endTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("endTime");
        String accounts = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("account");
        if (listUUID.size()!=0 && accounts == null){
            String account = getUUID();
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(accountUrl)
                    .releaseTime(System.currentTimeMillis())
                    .httpHead(TOKEN_KEY,TOKEN)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            Map<String,Object> map = new HashMap<>();
            map.put("account",account);
            map.put("from",startTime);
            map.put("to",endTime);
            map.put("size",50);
            HttpRequestBody body = HttpRequestBody.form(map,"utf-8");
            record.getHttpRequest().setMethod("post");
            record.getHttpRequest().setRequestBody(body);
            record.tagsCreator().bizTags().addCustomKV("account",account);
            record.tagsCreator().bizTags().addCustomKV("pages",1);
            parseLinks.add(record);
        }else{
            KVTag key = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("pages");
            Integer pages = (Integer) key.getVal();
            String totals = page.getJson().jsonPath($_type + ".data.pages").get();
            if (pages > Integer.valueOf(totals)){
                return;
            }
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(accountUrl)
                    .releaseTime(System.currentTimeMillis())
                    .httpHead(TOKEN_KEY,TOKEN)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            Map<String,Object> map = new HashMap<>();
            map.put("account",accounts);
            map.put("from",startTime);
            map.put("to",endTime);
            map.put("size",50);
            HttpRequestBody body = HttpRequestBody.form(map,"utf-8");
            record.getHttpRequest().setMethod("post");
            record.getHttpRequest().setRequestBody(body);
            record.tagsCreator().bizTags().addCustomKV("account",accounts);
            record.tagsCreator().bizTags().addCustomKV("pages",pages+1);
            parseLinks.add(record);
        }

    }

    public String getUUID(){
        String uuid = null;
        for (int i = 0; i < listUUID.size(); i++) {
            uuid =  listUUID.get(i);
            listUUID.remove(i);
            break;
        }
        return uuid;
    }
    private void getArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        if (listUUID.size() != 0){
            String uuid = getUUID();
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(articleUrl)
                    .releaseTime(System.currentTimeMillis())
                    .httpHead(TOKEN_KEY,TOKEN)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            Map<String,Object> map = new HashMap<>();
            map.put("uuid",uuid);
            HttpRequestBody body = HttpRequestBody.form(map,"utf-8");
            record.getHttpRequest().setMethod("post");
            record.getHttpRequest().setRequestBody(body);
            parseLinks.add(record);
        }

    }
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> dataList = new ArrayList<>();
        String url = crawlerRecord.getHttpRequest().getUrl();
        if (url.matches(articleRegulars)){
            this.washArticle(crawlerRecord,page,dataList);
        }
        if (url.matches(accountRegulars)){
            this.washAccount(crawlerRecord,page,dataList);
        }
        return dataList;
    }

    private void washAccount(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        String code = page.getJson().jsonPath($_type + ".code").get();
        if (!code.equals("0")){
            log.error("get data error code=[{}]",code);
        }
        List<String> all = page.getJson().jsonPath($_type + ".data.list").all();
        for (String str : all){
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article,new Random().nextInt(1000000)))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(System.currentTimeMillis())
                    .content(str)
                    .build();
            dataList.add(crawlerData);
        }
    }

    private void washArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> dataList) {
        String code = page.getJson().jsonPath($_type + ".code").get();
        if (!code.equals("0")){
            log.error("get data error code=[{}]",code);
        }
        List<String> all = page.getJson().jsonPath($_type + ".data.list").all();
        for (String str : all){
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord,page)
                    .dataId(StringUtils.joinWith("-",crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.article,new Random().nextInt(1000000)))
                    .url(crawlerRecord.getHttpRequest().getUrl())
                    .releaseTime(System.currentTimeMillis())
                    .content(str)
                    .build();
            dataList.add(crawlerData);
        }

    }


    private void recordAgainDownload(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        Integer integerCount = 0;
            String count = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("count");
            if (count == null){
                integerCount = 1;
            }else{
                integerCount = Integer.valueOf(count);
                if (integerCount >= 10){
                    log.error("The number of downloads exceeds the limit");
                    return;
                }
                integerCount+=1;
            }
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
            record.setRecordKey(crawlerRecord.getHttpRequest().getUrl()+integerCount);
            record.tagsCreator().bizTags().addCustomKV("count",integerCount);

        parseLinks.add(record);
    }




    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        return crawlerRecord.tagsCreator().bizTags().site().equals(site);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return domain;
    }
}
