package com.chance.cc.crawler.development.scripts.zgjkjw;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConfig;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.turnPage;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.turnPageItem;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2021-06-25 10:13:35
 * @email okprog@sina.com
 */
public class ZGJKWCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(ZGJKWCrawlerScript.class);

    public static final String domain = "zgjkjw";
    public static final String scriptSite = "news";

    public static final String listUrlRegex = "https://www\\.zgjkjw\\.cn/news/list_\\d*_\\d*\\.html";
    public static final String articleUrlRegex = "https://www\\.zgjkjw\\.cn/news/\\d*\\.html";
    public static final String listUrlFormat = "https://www.zgjkjw.cn/news/list_1_%s.html";
    public static final String articleUrlFormat = "https://www.zgjkjw.cn/news/%s.html";

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        HttpConfig httpConfig = requestRecord.getHttpConfig();
        httpConfig.setCircularRedirectsAllowed(true);
        return super.prepareRequest(requestRecord, supportSourceRecords);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        if (doHttpPageCheck(crawlerRequestRecord,httpPage)){
            addCrawlerRecords(parsedLinks,crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(listUrlRegex)){
            return parseListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = httpRequest.getUrl();
        Html html = httpPage.getHtml();
        String listTag = html.xpath("//li[@class=\"now\"]/a/text()").get();
        List<String> path = new ArrayList<>();
        path.add(listTag);
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(Field_Path,path);
        List<String> allHrefs = html.xpath("//ul[@class=\"pagination\"]/a/text()").all();
        if (hasNextPage(allHrefs)){
            int lastPage = Integer.parseInt(lastRequestUrl.split("_")[2].split("\\.")[0]);
            String nextUrl = String.format(listUrlFormat,(lastPage + 1));
            CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .recordKey(nextUrl)
                    .httpHeads(httpRequest.getHeaders())
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(nextPageRecord);
        }
        List<Selectable> itemNodes = html.xpath("//div[@class=\"base_list_content\"]/div[@class=\"item\"]").nodes();
        for (Selectable itemNode : itemNodes) {
            String articleUrl = itemNode.xpath("./div[@class=\"p\"]/a/@href").get();

            String timeStr = itemNode.xpath("./div[@class=\"r\"]/div[@class=\"c\"]/text()").get();
            try {
                String time = timeStr.trim().split(" ")[1];
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(articleUrl)
                        .recordKey(articleUrl)
                        .httpHeads(httpRequest.getHeaders())
                        .releaseTime(DateUtils.parseDate(time,"yyyy-MM-dd").getTime())
                        .copyBizTags()
                        .needParsed(false)
                        .needWashed(true)
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                        .build();
                parsedLinks.add(itemRecord);
            } catch (Exception e) {
                logger.error(e.getMessage());
            }

        }
        return parsedLinks;
    }

    private boolean hasNextPage(List<String> nextHrefs){
        boolean hasNext = false;
        for (String nextHref : nextHrefs) {
            if ("下一页".equals(nextHref.trim())) {
                hasNext = true;
                break;
            }
        }
        return hasNext;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String articleKey = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1).split("\\.")[0];
        String rawText = httpPage.getRawText();
        if (StringUtils.isBlank(rawText)){
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }
        String site = crawlerRequestRecord.tagsCreator().bizTags().site();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            washArticle(crawlerRequestRecord,httpPage,crawlerDataList,site,articleKey);
        }
        return crawlerDataList;
    }

    private void washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerData> crawlerDataList, String site, String articleKey) {
        Html html = httpPage.getHtml();
        String title = html.xpath("//h1[@class=\"show_title\"]/text()").get();
        String pubTime = html.xpath("//div[@class=\"show_time_m\"]/text()").get();
        String source = html.xpath("//div[@class=\"show_topview\"]/a/text()").get();
        List<String> allContent = html.xpath("//div[@class=\"show_content\"]//p//text()").all();
        StringBuffer sbContent = new StringBuffer();
        for (String string : allContent) {
            sbContent.append(string.trim());
        }
        try {
            CrawlerData crawlerAData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(String.format(articleUrlFormat,articleKey))
                    .dataId(StringUtils.joinWith("-",domain(),site,article.enumVal(),articleKey))
                    .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm").getTime())
                    .addContentKV(Field_Title,unescapeHtml2J(title))
                    .addContentKV(Field_Source,source)
                    .addContentKV(Field_Content,unescapeHtml2J(sbContent.toString()))
                    .resultLabelTag(article)
                    .build();
            crawlerDataList.add(crawlerAData);
        } catch (ParseException e) {
            logger.error(e.getMessage(),e);
        }
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(listUrlRegex);
        addUrlRegular(articleUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String crawlerSite = crawlerRequestRecord.tagsCreator().bizTags().site();
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return domain;
    }


    private void addCrawlerRecords(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord){

        int count = 1;
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        if (crawlerBusinessTags.hasKVTag("download_retry_count")){
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag("download_retry_count").getVal();
            if (count >= 100){
                logger.error("{} download has number of retries exceeds the limit" +
                        ",request url {}",domain,crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        count++;
        crawlerBusinessTags.addCustomKV("download_retry_count",count);

        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRecord)
                .httpRequest(crawlerRecord.getHttpRequest())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .build();
        crawlerRequestRecords.add(crawlerRequestRecord);

        if(crawlerRecord.tagsCreator().requestTags().hasRequestType(turnPageItem)){
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(turnPage);
            crawlerRequestRecord.tagsCreator().requestTags().addRequestType(turnPageItem);
        }
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
    }

    /**
     * 检查页面下载是否成功、完整
     *
     * @param crawlerRequestRecord last record
     * @param httpPage             page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200) {
            logger.error("download page {} error, status code is {}", lastRequestUrl, statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()) {
            logger.error("download page failed, check your link {}", lastRequestUrl);
            return true;
        }
        if (org.apache.commons.lang3.StringUtils.isBlank(httpPage.getRawText())) {
            logger.error("download page empty, check your link {}", lastRequestUrl);
            return true;
        }
        return false;
    }

    public static String unescapeHtml2J(String str){
        int times = 0;
        while (str.contains("&") && str.contains(";")){
            str = StringEscapeUtils.unescapeHtml(str);
            times ++;
            if (times > 5){
                break;
            }
        }
        return str;
    }
}
