package com.chance.cc.crawler.development.scripts.sina.searchKw;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.ParseException;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/6/24 10:25
 * @Description 新浪关键词查询
 **/
public class SinaSearchKwCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(SinaSearchKwCrawlerScript.class);
    private static final String DOMAIN = "sina";
    private static final String SITE = "searchKw";
    private static final String CJ_SITE = "cj_searchKw";
    private static final String REQUEST_AGAIN_TAG = "sina_request_retry";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";

    private static final String ENTRANCE_URL = "https://search.sina.com.cn/";
    private static final String SEARCH_URL = "https://search.sina.com.cn/\\S*page=\\S*";
    private static final String CJ_SEARCH_URL = "https://cre.dp.sina.cn/webes/simba/s2\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(SEARCH_URL);
        addUrlRegular(CJ_SEARCH_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return site.equals(SITE) || site.equals(CJ_SITE);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();

        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        //下载成功    或者得到code 值为200
        if (!httpPage.isDownloadSuccess() || (httpPage.getStatusCode() != 200)) {
            log.error("{} status code : {}", crawlerRequestRecord.getHttpRequest().getUrl(), httpPage.getStatusCode());
            if (httpPage.getStatusCode() != 404) {  //不是空页面
                requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            }
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (requestUrl.matches(SEARCH_URL)) {
            searchUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(CJ_SEARCH_URL)) {
            cjSearchUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        return parsedLinks;
    }

    private void searchUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String result = washContent("找到相关新闻\\d+篇", httpPage.getRawText());
        String keyword = (String) httpPage.getRequest().getExtras().get("keyword");
        if ("找到相关新闻0篇".equals(result)) {
            log.error("{} search result is 0!", keyword);
            return;
        }
        String rawText = httpPage.getRawText();
        String requestUrl = httpPage.getRequest().getUrl();
        String aText = washContent("<a[\\S\\ ]*>下一页</a>", rawText);
        if (StringUtils.isNotBlank(aText)) {
            String nextUrl = new Html(aText).xpath("//a/@href").get();
            nextUrl = requestUrl.split("\\?")[0] + StringEscapeUtils.unescapeHtml(nextUrl);
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            record.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
            parsedLinks.add(record);
        }

        //详情页
        String allItemDatas = rawText.substring(rawText.indexOf("<div id=\"nav_result_container\"></div>"), rawText.indexOf("<table cellspacing=\"0\" style=\"margin:0 auto;\">"));
        if (StringUtils.isBlank(allItemDatas)) {
            log.error("url [{}] itemList can not get!", requestUrl);
            return;
        }
        List<Selectable> nodes = new Html(allItemDatas).xpath("//h2").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath("./a/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            } else {
                itemUrl = StringEscapeUtils.unescapeHtml(itemUrl);
            }

            String releaseTime = node.xpath("./span/text()").get();
            releaseTime = StringUtils.isBlank(releaseTime) ? "" : washContent("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", releaseTime);
            if (StringUtils.isBlank(releaseTime)) {
                continue;
            }

            try {
                long releaseTimeToLong = washTimeToLong(releaseTime);
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl + keyword)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void cjSearchUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String msg = httpPage.getJson().jsonPath($_type + ".status.msg").get();
        if (StringUtils.isBlank(msg) || !"ok".equals(msg)) {
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        String total = httpPage.getJson().jsonPath($_type + ".total").get();
        String keyword = (String) httpPage.getRequest().getExtras().get("keyword");
        if ("0".equals(total)) {
            log.error("{} search result is 0!", keyword);
            return;
        }

        String[] split = requestUrl.split("page=");
        String nextUrl = split[0] + "page=" + (Integer.parseInt(split[1])+1);
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                .copyBizTags()
                .copyResultTags()
//                .notFilterRecord()
                .build();
        record.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());

        List<String> all = httpPage.getJson().jsonPath($_type + ".docs").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String itemUrl = jsonObject.getString("url");
            String releaseTime = jsonObject.getString("ctime");
            if(StringUtils.isBlank(itemUrl) || StringUtils.isBlank(releaseTime)){
                continue;
            }

            long releaseTimeToLong = Long.parseLong(releaseTime + "000");
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(itemUrl)
                    .recordKey(itemUrl + keyword)
                    .releaseTime(releaseTimeToLong)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(itemRecord);
        }
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        if (extras == null) {
            return;
        }
        String searchKwSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchKwSourceUrl)) {
            log.error("searchKw source url can not is null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String itemUrl = searchKwSourceUrl.replace("%s", URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .httpHeads(requestRecord.getHttpRequest().getHeaders())
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                turnRecord.getHttpRequest().addExtra("keyword", keyword);
                crawlerRecords.add(turnRecord);
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        return null;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error("sina download page the number of retries exceeds the limit" +
                        ",request url {},detail is [{}]", crawlerRecord.getHttpRequest().getUrl(),JSONObject.toJSONString(crawlerRecord));
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static String washContent(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    private static long washTimeToLong(String time) throws ParseException {
        if (StringUtils.isBlank(time)) {
            return 0;
        }

        time = time.trim();
        if (time.matches("\\d{2}月\\d{2}日") || time.matches("\\d{2}月\\d{2}日 \\d{2}:\\d{2}")) {
            time = LocalDate.now().getYear() + time;
        }
        return DateUtils.parseDate(time, "yyyy-MM-dd HH:mm:ss", "发表于 yyyy/MM/dd HH:mm", "yyyyMM月dd日", "yyyy-MM-dd HH:mm", "yyyyMM月dd日 HH:mm", "日期：yyyy-MM-dd", "yyyy年MM月dd日 HH:mm").getTime();
    }

    public static void main(String[] args) {
        String s = "https://a.sina.cn/t/author/19656553/";

        String substring = s.substring(s.substring(0, s.length() - 1).lastIndexOf("/") + 1);
        System.out.println(substring);


    }
}
