package com.chance.cc.crawler.development.scripts.weibo.self;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.ParseException;
import java.time.LocalDate;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static org.apache.commons.lang3.time.DateUtils.MILLIS_PER_MINUTE;
import static org.apache.commons.lang3.time.DateUtils.MILLIS_PER_SECOND;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/12/28 13:50
 * @Description 微博关键词采集脚本
 **/
public class WeiboSearchKwCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(WeiboSearchKwCrawlerScript.class);
    private static final String DOMAIN = "weibo";
    private static final String SITE = "searchKw";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";
    private static final String DOMAIN_RESULT_JSON_RECORD_TAG = "domain_result_json";//初始record结果字段

    private static final String PREFIX = "https://s.weibo.com";
    private static final String ENTRANCE_URL = "https://s.weibo.com/\\?Refer=";
    private static final String SEARCH_URL = "https://s.weibo.com/weibo\\?q=\\S*";

    private List<String> cookies = new ArrayList<>();
    private List<String> userAgents = new ArrayList<>();

    private AtomicInteger cookieIndex = new AtomicInteger(0);
    private AtomicInteger userAgentIndex = new AtomicInteger(0);
    private Object cookieLock = new Object();
    private Object userAgentLock = new Object();

    private static final String DEFAULT_USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36";
    private static final String DEFAULT_COOKIE = "SUB=_2A25Nj-B8DeRhGeBO71YR-CzMwz2IHXVu_Va0rDV8PUNbmtANLXD2kW9NShYOQisp_anjAzCjh0OQ2nBhYy3_WZRn";


    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "weibo";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(SEARCH_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String searchKw = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return SITE.equals(searchKw);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();

        if (supportSourceRecords != null && supportSourceRecords.size() > 0) {
            CrawlerRequestRecord keywordSupportSourceRecord = null;
            for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
                String supportUrl = supportSourceRecord.getHttpRequest().getUrl();
                if (supportUrl.contains("keys")) {
                    keywordSupportSourceRecord = supportSourceRecord;
                } else if (supportUrl.contains("userOauthInfos")) {
                    initCookieMap(supportSourceRecord);
                } else if ("https://fake-useragent.herokuapp.com/browsers/0.1.11".equals(supportUrl)) {
                    initUserAgents(supportSourceRecord);
                }
            }

            if (keywordSupportSourceRecord != null) {
                initKeywordCrawlerRecord(requestRecord, keywordSupportSourceRecord, allItemRecords);
            } else {
                log.error("weibo search keywords cannot be empty!");
            }
        }

        String userAgent = getOneUserAgent();
        if (StringUtils.isBlank(userAgent)) {
            userAgent = DEFAULT_USER_AGENT;
        }
        String cookie = getOneCookie();
        if (StringUtils.isBlank(cookie)) {
            cookie = DEFAULT_COOKIE;
        }
        requestRecord.getHttpRequest().addHeader("Cookie", cookie);
        requestRecord.getHttpRequest().addHeader("User-Agent", userAgent);
        return allItemRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || (httpPage.getStatusCode() != 200 && httpPage.getStatusCode() != 404)) {
            //下载失败，重新进行下载
            log.warn("{} status code : [{}],will retry!",requestUrl,httpPage.getStatusCode());
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (requestUrl.matches(SEARCH_URL)) {
            searchUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }
        return parsedLinks;
    }

    private void searchUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //判断关键词是否搜索出结果
        String result = httpPage.getHtml().xpath("//div[contains(@class,'card-no-result')]").get();
        if (StringUtils.isNotBlank(result)) {
            //判断是下载有问题
//            String message = httpPage.getHtml().xpath("//div[@class=\"m-note\"]/a").get();
            if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag(REQUEST_AGAIN_TAG)) {
                log.warn("weibo search download is failed ,will retry!");
                requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            } else {
                log.warn("weibo search keyword result is null!url is:{}", httpPage.getRequest().getUrl());
                crawlerRequestRecord.setNeedParsedPage(false);
            }
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        //下一页地址的解析
        String nextPageUrl = httpPage.getHtml().xpath("//a[@class=\"next\"]/@href").get();
        if (StringUtils.isNotEmpty(nextPageUrl)) {
            nextPageUrl = PREFIX + StringEscapeUtils.unescapeHtml(nextPageUrl);
            CrawlerRequestRecord nextCrawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .needWashed(true)
                    .copyResultTags()
                    .build();
            parsedLinks.add(nextCrawlerRequestRecord);
        }

        //详情页解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@id=\"pl_feedlist_index\"]//div[@class=\"card-wrap\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath(".//div[@class=\"content\"]/p[@class=\"from\"]/a[1]/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            } else {
                itemUrl = "https:" + itemUrl;
            }

            String releaseTime = node.xpath(".//div[@class=\"content\"]/p[@class=\"from\"]/a[1]/text()").get();
            if (StringUtils.isBlank(releaseTime)) {
                continue;
            }
            if (releaseTime.contains("转赞人数")) {
                releaseTime = releaseTime.substring(0, releaseTime.indexOf("转赞人数"));
            }

            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(articleReleaseTime(releaseTime.trim()))
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord,page));
        }
        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord,page));
        }
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@id=\"pl_feedlist_index\"]//div[@class=\"card-wrap\"]").nodes();
        for (Selectable node : nodes) {
            String url = node.xpath(".//div[@class=\"content\"]/p[@class=\"from\"]/a[1]/@href").get();
            String articleKey = "";
            if (StringUtils.isBlank(url)) {
                continue;
            } else {
                articleKey = url.substring(url.lastIndexOf("/") + 1,url.lastIndexOf("?"));
            }
            String author = node.xpath(".//a[@class=\"name\"]/text()").get();
            String authorId = node.xpath(".//a[@class=\"name\"]/@href").get();
            authorId = StringUtils.isNotBlank(authorId) ? authorId.substring(authorId.lastIndexOf("/") + 1,authorId.lastIndexOf("?")) : "";
            String releaseTime = node.xpath(".//div[@class=\"content\"]/p[@class=\"from\"]/a[1]/text()").get();
            if (StringUtils.isBlank(releaseTime)) {
                continue;
            }
            if (releaseTime.contains("转赞人数")) {
                releaseTime = releaseTime.substring(0, releaseTime.indexOf("转赞人数"));
            }


            try {
                long releaseTimeToLong = articleReleaseTime(releaseTime);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }

        String articleKey = itemUrl.contains("?") ? itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("?")) : itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String author = httpPage.getHtml().xpath("//a[@class=\"W_f14 W_fb S_txt1\"]/text()").get();

        List<String> textList = httpPage.getHtml().xpath("//div[@class=\"WB_text W_f14\"]//text()").all();
        StringBuffer content = new StringBuffer();
        for (String text : textList) {
            if (StringUtils.isEmpty(text)) {
                continue;
            }
            content.append(text).append(" ");
        }

        String releaseTime = httpPage.getHtml().xpath("//div[@node-type=\"follow_recommend_box\"]/preceding-sibling::div/a[@node-type=\"feed_list_item_date\"]/text()").get();
        if (StringUtils.isEmpty(releaseTime)) {
            return null;
        }

        CrawlerData article = null;
        try {
            article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(articleReleaseTime(releaseTime.trim()))
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Content, content.toString().trim())
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return article;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.contains("?") ? itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("?")) : itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        String forwards = httpPage.getHtml().xpath("//span[@node-type=\"forward_btn_text\"]//em[@class=\"W_ficon ficon_forward S_ficon\"]/following-sibling::em").get();
        forwards = "转发".equals(forwards) ? "0" : forwards;
        String comments = httpPage.getHtml().xpath("//span[@node-type=\"comment_btn_text\"]//em[@class=\"W_ficon ficon_repeat S_ficon\"]/following-sibling::em").get();
        comments = "评论".equals(comments) ? "0" : comments;
        String likes = httpPage.getHtml().xpath("//ul[@class=\"WB_row_line WB_row_r4 clearfix S_line2\"]//span[@node-type=\"like_status\"]/em[@class=\"W_ficon ficon_praised S_txt2\"]/following-sibling::em").get();
        likes = "赞".equals(likes) ? "0" : likes;

        String releaseTime = httpPage.getHtml().xpath("//div[@node-type=\"follow_recommend_box\"]/preceding-sibling::div/a[@node-type=\"feed_list_item_date\"]/text()").get();
        if (StringUtils.isEmpty(releaseTime)) {
            return null;
        }
        CrawlerData interaction = null;
        try {
            interaction = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", requestRecord.getDomain(), article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(articleReleaseTime(releaseTime.trim()))
                    .addContentKV(Field_I_Forwards, forwards)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_I_Comments, comments)
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return interaction;
    }

    private void initKeywordCrawlerRecord(CrawlerRequestRecord requestRecord, CrawlerRequestRecord keywordSupportSourceRecord, List<CrawlerRecord> allItemRecords) {
        HttpPage internalDownloadPage = keywordSupportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String url = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                allItemRecords.add(turnRecord);
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void initUserAgents(CrawlerRequestRecord supportSourceRecord) {

        try {
            HttpPage httpPage = supportSourceRecord.getInternalDownloadPage();
            Json rawText = new Json(httpPage.getRawText());
            String browsers = rawText.jsonPath($_type + ".browsers").get();
            Map<String, Object> map = new Json(browsers).toObject(Map.class);
            for (Map.Entry<String, Object> entry : map.entrySet()) {
                List<String> agents = (List<String>) entry.getValue();
                userAgents.addAll(agents);
            }
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
    }

    private void initCookieMap(CrawlerRequestRecord supportSourceRecord) {
        Set<String> cookieSets = new HashSet<>();
        try {
            HttpPage httpPage = supportSourceRecord.getInternalDownloadPage();
            Json rawText = new Json(httpPage.getRawText());
            String status = rawText.jsonPath($_type + ".status").get();
            List<String> contents = rawText.jsonPath($_type + ".content").all();

            if ("0".equals(status) && contents != null && contents.size() > 0) {

                cookies.clear(); //初始新的cookies

                List<Map> userOauthInfos = new Json(contents.get(0)).toList(Map.class);
                for (Map userOauthInfo : userOauthInfos) {
                    Json oauthInfoJson = new Json(String.valueOf(userOauthInfo.get("oauthInfo")));
                    String category = oauthInfoJson.jsonPath($_type + ".category").get();
                    String cookie = oauthInfoJson.jsonPath($_type + ".cookie").get();
                    if (SITE.equals(category)) {
                        cookieSets.add(cookie);
                    }
                }
            }
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
        cookies.addAll(cookieSets);
    }

    private String getOneCookie() {

        synchronized (cookieLock) {
            if (cookieIndex.get() >= cookies.size()) {
                cookieIndex.set(0);
            }

            String cookie = null;
            try {
                cookie = cookies.get(cookieIndex.getAndIncrement());
            } catch (Exception e) {
                log.error(e.getMessage());
            }

            return cookie;
        }

    }

    private String getOneUserAgent() {

        synchronized (userAgentLock) {
            if (userAgentIndex.get() >= userAgents.size()) {
                userAgentIndex.set(0);
            }

            String userAgent = null;
            try {
                userAgent = userAgents.get(userAgentIndex.getAndIncrement());
            } catch (Exception e) {
                log.error(e.getMessage());
            }
            return userAgent;
        }
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static Long articleReleaseTime(String timeStr) throws ParseException {
        if (StringUtils.isBlank(timeStr)) {
            return new Long("0");
        }

        LocalDate now = LocalDate.now();//今天(yyyy-MM-dd)
        if (timeStr.contains("日") && timeStr.contains("月")) {
            String time = timeStr.contains("年") ? timeStr : now.getYear() + "年" + timeStr;
            return DateUtils.parseDate(time, "yyyy年MM月dd日 HH:mm").getTime();
        }

        if (timeStr.startsWith("今天")) {
            String time = timeStr.replace("今天", now.toString());
            return DateUtils.parseDate(time, "yyyy-MM-ddHH:mm").getTime();
        }

        if (timeStr.startsWith("昨天")) {
            String yesterday = now.minus(1, ChronoUnit.DAYS).toString();
            String time = timeStr.replace("昨天", yesterday);
            return DateUtils.parseDate(time, "yyyy-MM-ddHH:mm").getTime();
        }

        if (timeStr.endsWith("刚刚")) {
            return System.currentTimeMillis();
        }
        //59分钟前  7秒前
        if (timeStr.endsWith("秒前")) {
            int number = Integer.valueOf(timeStr.substring(0, timeStr.indexOf("秒前")));
            return (System.currentTimeMillis() - number * MILLIS_PER_SECOND);
        }

        if (timeStr.endsWith("分钟前")) {
            int number = Integer.valueOf(timeStr.substring(0, timeStr.indexOf("分钟前")));
            return (System.currentTimeMillis() - number * MILLIS_PER_MINUTE);
        }

        if (timeStr.matches("\\d{4}-\\d+-\\d+ \\d+:\\d+")) {
            return DateUtils.parseDate(timeStr, "yyyy-MM-dd HH:mm").getTime();
        }

        if (timeStr.contains("楼")) {
            timeStr = timeStr.replaceAll("第\\d+楼", "");
            return DateUtils.parseDate(timeStr.trim(), "yyyy-MM-dd HH:mm").getTime();
        }
        return new Long("1");
    }
}
