package com.chance.cc.crawler.development.scripts.weibo;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConstant;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/1/25 13:57
 * @Description 微博 api定时采集
 **/
public class WeiboApiCronCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(WeiboApiCronCrawlerScript.class);

    private static final String requestAgainTag = "weibo_api_request_retry";

    private static final String subscribeUrl = "https://c.api.weibo.com/subscribe/update_subscribe.json\\S*";
    private static final String subscribeDataUrl = "https://c.api.weibo.com/commercial/push\\S*";
    private static final String limitedDataUrl = "https://c.api.weibo.com/2/search/statuses/limited.json\\S*";

    private static final String userTagsSourceUrl = "https://c.api.weibo.com/2/tags/tags_batch/other.json?source=%s&access_token=%s&uids=%s";
    private static final String userTagsUrl = "https://c.api.weibo.com/2/tags/tags_batch/other.json\\S*";

    private static final String userAgeSourceUrl = "https://c.api.weibo.com/2/users/birthday/other.json?source=%s&access_token=%s&uid=%s";
    private static final String userAgeUrl = "https://c.api.weibo.com/2/users/birthday/other.json\\S*";
    /**
     *
     *
     *
     *
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "weibo";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(subscribeUrl);
        addUrlRegular(subscribeDataUrl);
        addUrlRegular(limitedDataUrl);
        addUrlRegular(userTagsUrl);
        addUrlRegular(userAgeUrl);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String supportRequestUrl = supportSourceRecord.getHttpRequest().getUrl();
            if (supportRequestUrl.contains("keys")) {
                initKeywordRecord(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }
    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return site.startsWith("apiCorn");
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() == 503) {
            log.error("weibo api url down load page is fail!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        //判断返回的页面是否成功
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        if(requestUrl.matches(subscribeDataUrl)){
            httpPage.setRawText("["+httpPage.getRawText()+"]");
        }else if(requestUrl.startsWith("[")){
            String rawText = httpPage.getRawText();
            httpPage.setRawText(rawText.substring(1,rawText.length() - 1));
        }else{
            JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
            String error_code = jsonObject.getString("error_code");
            if(StringUtils.isNotEmpty(error_code)){
                log.error("requestUrl [{}] is error :[{}]!",requestUrl,jsonObject.getString("error"));
                return parsedLinks;
            }
        }

        if(requestUrl.matches(limitedDataUrl)){
            //进行翻页操作
            apiTurnPageRecord(crawlerRequestRecord,httpPage,parsedLinks);

            Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
            String source = (String) extras.get("source");
            String access_token = (String) extras.get("access_token");
            //开始进行获取用户的标签和年龄
            List<String> all = httpPage.getJson().jsonPath($_type + ".statuses").all();
            if(all.size() < 1){
                log.error("limited weibo [{}] post is null !",requestUrl);
            }
            for (String data : all) {
                JSONObject jsonObject = JSONObject.parseObject(data);
                String id = jsonObject.getJSONObject("user").getString("id");

                String tagsUrl = String.format(userTagsSourceUrl,source,access_token,id);
                CrawlerRequestRecord userTagsRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(tagsUrl)
                        .copyBizTags()
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .resultLabelTag(article)
                        .build();
                userTagsRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag(requestAgainTag);
                parsedLinks.add(userTagsRecord);

                String ageUrl = String.format(userAgeSourceUrl,source,access_token,id);
                CrawlerRequestRecord userAgeRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(ageUrl)
                        .copyBizTags()
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .resultLabelTag(article)
                        .build();
                userAgeRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag(requestAgainTag);
                parsedLinks.add(userAgeRecord);

            }
        }

        return parsedLinks;
    }

    //api接口的翻页
    private void apiTurnPageRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("\\?");
        String nextPage = split[0] + "?";
        int currentCount = 0;
        int page = 0;
        int count = 0;
        //判断是否进行翻页
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        Map<String,Object> extras = new HashMap<>();
        for (NameValuePair nameValuePair : parse) {
            if ("page".equals(nameValuePair.getName())) {
                page = Integer.parseInt(nameValuePair.getValue());
                nextPage = nextPage + nameValuePair.getName() + "=" + (page + 1) + "&";
            } else if ("count".equals(nameValuePair.getName())) {
                count = Integer.parseInt(nameValuePair.getValue());
                nextPage = nextPage + nameValuePair.getName() + "=" + count + "&";
            } else if("q".equals(nameValuePair.getName())) {
                try {
                    nextPage = nextPage + nameValuePair.getName() + "=" + URLEncoder.encode(nameValuePair.getValue(),"UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
                    log.error(e.getMessage());
                }
            }else {
                nextPage = nextPage + nameValuePair.getName() + "=" + nameValuePair.getValue() + "&";
            }
            extras.put(nameValuePair.getName(),nameValuePair.getValue());
        }
        crawlerRequestRecord.getHttpRequest().setExtras(extras);

        currentCount = page * count;
        String totalNumber = httpPage.getJson().jsonPath($_type + ".total_number").get();
        if (currentCount >= Integer.parseInt(totalNumber)) {
            return;
        }
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextPage.substring(0,nextPage.length() - 1))
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .needWashed(true)
                .resultLabelTag(article)
                .build();
        turnRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(requestAgainTag);
        parsedLinks.add(turnRecord);

    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        if (crawlerResultTags.hasDataType(article)) {
            if(requestUrl.matches(subscribeDataUrl)){
                crawlerDataList.addAll(washSubscribeDataList(crawlerRecord,page));
            }else{
                crawlerDataList.add(washArticlePage(crawlerRecord,page));
            }
        }

        return crawlerDataList;
    }

    private CrawlerData washArticlePage(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();

        CrawlerData crawlerData = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .url(itemUrl)
                    .content(httpPage.getRawText())
                    .build();

        return crawlerData;
    }

    private List<CrawlerData> washSubscribeDataList(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();

        List<String> strings = JSONObject.parseArray(httpPage.getRawText(), String.class);
        if(strings.size() < 1){
            log.error("subscribe data is null!");
            return crawlerDataList;
        }

        for (String string : strings) {
            JSONObject jsonObject = JSONObject.parseObject(string);

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .url(itemUrl)
                    .content(jsonObject.toJSONString())
                    .build();

            crawlerDataList.add(crawlerData);
        }

        return crawlerDataList;
    }


    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {

        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(requestAgainTag)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(requestAgainTag).getVal();
            if (count >= 5) {
                log.error("Weibo search keyword download page the number of retries exceeds the limit" +
                        ",request url {},download detail {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(crawlerRecord.getHttpRequest().getUrl())
                .recordKey(crawlerRecord.getRecordKey() + count)
                .notFilterRecord()
                .copyBizTags()
                .releaseTime(System.currentTimeMillis())
                .resultLabelTag(article)
                .build();

        String method = crawlerRecord.getHttpRequest().getMethod();
        if(HttpConstant.Method.POST.equals(method)){
            HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
            httpRequest.setMethod(method);
            httpRequest.setRequestBody(crawlerRecord.getHttpRequest().getRequestBody());
        }

        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(requestAgainTag, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static void initKeywordRecord(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String method = (String) extras.get("method");
        String requestUrl = requestRecord.getHttpRequest().getUrl();

        HttpPage supportHttpPage = supportSourceRecord.getInternalDownloadPage();
        List<String> content = supportHttpPage.getJson().jsonPath($_type + ".content").all();
        if (content.size() < 1) {
            log.error("key is null !");
            return;
        }

        for (String data : content) {
            JSONObject jsonObject = JSONObject.parseObject(data);

            CrawlerRequestRecord build = CrawlerRequestRecord.builder()
                    .itemPageRequest(requestRecord)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .notFilterRecord()
                    .resultLabelTag(article)
                    .build();

            HttpRequest httpRequest = new HttpRequest();
            Map map = jsonObject.getJSONObject("keyword").toJavaObject(Map.class);
            Set<String> set = map.keySet();
            String record = requestUrl + "?";
            for (String key : set) {
                record = record + key + map.get(key) + "&";
            }
            record = record.substring(0,record.length() - 1);
            if(HttpConstant.Method.POST.equals(method.toUpperCase())){
                httpRequest.setUrl(requestUrl);
                httpRequest.setMethod(method);
                httpRequest.setRequestBody(HttpRequestBody.json(JSON.toJSONString(map),"UTF-8"));
            }else{
                httpRequest.setUrl(record);
            }
            build.setRecordKey(record);
            build.setHttpRequest(httpRequest);

            crawlerRecords.add(build);
        }
    }
    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }
}
