package com.chance.cc.crawler.development.scripts.yiyaoxw;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConstant;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import org.apache.commons.lang.CharEncoding;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.URLEncoder;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.turnPage;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.turnPageItem;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.Field_I_Collection;

/**
 * @author lt
 * @version 1.0
 * @date 2021-06-25 13:16:21
 * @email okprog@sina.com
 */
public class YiYaoZXCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(YiYaoZXCrawlerScript.class);

    public static final String domain = "yiyaoxw";
    public static final String scriptSite = "news";

    public static final String keysRegex = "https?://\\S*v1/meta/" + domain + "/keys\\S*";

    public static final String listUrlRegex = "https://yiyaoxw\\.com/category/yimei/page/\\d*";
    public static final String searchListRegex = "https://yiyaoxw\\.com/page/\\S*";
    public static final String articleUrlRegex = "https://yiyaoxw\\.com/\\d{6}/\\d{2}/\\d*\\.html";

    public static final String listUrlFormat = "https://yiyaoxw.com/category/yimei/page/%S";
    public static final String searchListFormat = "https://yiyaoxw.com/page/%s?s=";
    public static final String articleUrlFormat = "http://web.yishengzhan.cn/#/articleDetails?articleId=%s&source=7";

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();
        HttpRequest httpRequest = requestRecord.getHttpRequest();
        if (null == supportSourceRecords || supportSourceRecords.size() == 0){
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String supportUrl = supportSourceRecord.getHttpRequest().getUrl();
            if (supportUrl.matches(keysRegex)){
                initKeywordsRecord(allItemRecords,requestRecord,supportSourceRecord);
            }
        }
        return allItemRecords;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        if (doHttpPageCheck(crawlerRequestRecord,httpPage)){
            addCrawlerRecords(parsedLinks,crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(listUrlRegex)){
            return parseListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(searchListRegex)){
            return parseSearchLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;

    }

    private List<CrawlerRequestRecord> parseSearchLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Html html = httpPage.getHtml();
        List<Selectable> itemNodes = html.xpath("//div[@class=\"content\"]/article").nodes();
        if (null != itemNodes && itemNodes.size() > 0){
            int page = Integer.parseInt(httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1).split("\\?")[0]);
            String encodeKeyword = httpRequestUrl.split("=")[1];
            String nextUrl = String.format(searchListFormat,(page + 1)) + encodeKeyword;
            CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .recordKey(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(nextPageRecord);
            genItemRecords(crawlerRequestRecord,itemNodes,parsedLinks);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Html html = httpPage.getHtml();
        List<String> path = html.xpath("//div[@class=\"catleader\"]/h1/text()").all();
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(Field_Path,path);
        List<Selectable> itemNodes = html.xpath("//div[@class=\"content\"]/article").nodes();
        if (null != itemNodes && itemNodes.size() > 0){
            int page = Integer.parseInt(httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1));
            String nextUrl = String.format(listUrlFormat,(page + 1));
            CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .recordKey(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(nextPageRecord);
            genItemRecords(crawlerRequestRecord,itemNodes,parsedLinks);
        }
        return parsedLinks;
    }

    private void genItemRecords(CrawlerRequestRecord crawlerRequestRecord, List<Selectable> itemNodes, List<CrawlerRequestRecord> parsedLinks) {
        for (Selectable itemNode : itemNodes) {
            String articleUrl = itemNode.xpath("./a[@class=\"focus\"]/@href").get();
            String pubTime = itemNode.xpath("./p[@class=\"meta\"]/time/text()").get();
            String author = itemNode.xpath("./p[@class=\"meta\"]/span[@class=\"author\"]/text()").get();
            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(articleUrl)
                        .recordKey(articleUrl)
                        .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd").getTime())
                        .copyBizTags()
                        .needParsed(false)
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                itemRecord.tagsCreator().bizTags().addCustomKV("author",author);
                parsedLinks.add(itemRecord);
            } catch (Exception e) {
                logger.error(e.getMessage());
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String articleKey = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1).split("\\.")[0];
        String rawText = httpPage.getRawText();
        if (StringUtils.isBlank(rawText)){
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }

        String site = crawlerRequestRecord.tagsCreator().bizTags().site();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            washArticle(crawlerRequestRecord,httpPage,crawlerDataList,site,articleKey);
        }
        return crawlerDataList;
    }

    private void washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerData> crawlerDataList, String site, String articleKey) {
        Html html = httpPage.getHtml();
        String title = html.xpath("//h1[@class=\"article-title\"]/a/text()").get();
        String author = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("author");
        List<String> allContents = html.xpath("//article//p//text()").all();
        StringBuffer sbContent = new StringBuffer();
        for (String allContent : allContents) {
            sbContent.append(allContent.trim());
        }
        List<String> allImages = html.xpath("//article//p//img/@src").all();
        StringBuffer sbImage = new StringBuffer();
        for (String allImage : allImages) {
            sbImage.append(allImage).append("\\x01");
        }
        String views = html.xpath("//span[@class=\"item post-views\"]/text()").get();
        views = views.split("\\(")[1].replace(")","");
        String likes = html.xpath("//div[@class=\"post-actions\"]/a/span/text()").get();
        CrawlerData crawlerAData = CrawlerData.builder()
                .data(crawlerRequestRecord,httpPage)
                .url(String.format(articleUrlFormat,articleKey))
                .dataId(StringUtils.joinWith("-",domain(),site,article.enumVal(),articleKey))
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_Title,title)
                .addContentKV(Field_Author,author)
                .addContentKV(Field_Content,sbContent.toString())
                .addContentKV(Field_Images,sbImage.toString())
                .resultLabelTag(article)
                .build();
        crawlerDataList.add(crawlerAData);
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
            CrawlerData crawlerInteractionData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(String.format(articleUrlFormat,articleKey))
                    .dataId(StringUtils.joinWith("-",domain(),site,interaction.enumVal(),articleKey))
                    .parentId(StringUtils.joinWith("-",domain(),site,article.enumVal(),articleKey))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_I_Likes,likes)
                    .addContentKV(Field_I_Views,views)
                    .resultLabelTag(interaction)
                    .build();
            crawlerDataList.add(crawlerInteractionData);
        }

    }

    private void initKeywordsRecord(List<CrawlerRecord> allItemRecords, CrawlerRequestRecord requestRecord, CrawlerRequestRecord keywordRecord) {
        try {
            JSONObject jsonObject = JSONObject.parseObject(keywordRecord.getInternalDownloadPage().getRawText());
            if (jsonObject.getIntValue("status") == 0) {
                JSONArray objects = jsonObject.getJSONArray("content");
                for (Object object : objects) {
                    String keyword = ((JSONObject) object).getString("keyword");
                    String listUrl = String.format(searchListFormat,1) + URLEncoder.encode(keyword);
                    CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(requestRecord)
                            .httpUrl(listUrl)
                            .recordKey(listUrl)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .copyBizTags()
                            .build();
                    listRecord.tagsCreator().bizTags().addKeywords(keyword);
                    allItemRecords.add(listRecord);
                }
            }
        } catch (Exception e) {
            logger.error("from keywords init urls failed");
            logger.error(e.getMessage(), e);
        }
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(listUrlRegex);
        addUrlRegular(searchListRegex);
        addUrlRegular(articleUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String crawlerSite = crawlerRequestRecord.tagsCreator().bizTags().site();
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return domain;
    }

    private void addCrawlerRecords(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord){

        int count = 1;
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        if (crawlerBusinessTags.hasKVTag("download_retry_count")){
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag("download_retry_count").getVal();
            if (count >= 100){
                logger.error("{} download has number of retries exceeds the limit" +
                        ",request url {}",domain,crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        count++;
        crawlerBusinessTags.addCustomKV("download_retry_count",count);

        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRecord)
                .httpRequest(crawlerRecord.getHttpRequest())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .build();
        crawlerRequestRecords.add(crawlerRequestRecord);

        if(crawlerRecord.tagsCreator().requestTags().hasRequestType(turnPageItem)){
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(turnPage);
            crawlerRequestRecord.tagsCreator().requestTags().addRequestType(turnPageItem);
        }
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
    }

    /**
     * 检查页面下载是否成功、完整
     *
     * @param crawlerRequestRecord last record
     * @param httpPage             page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200) {
            logger.error("download page {} error, status code is {}", lastRequestUrl, statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()) {
            logger.error("download page failed, check your link {}", lastRequestUrl);
            return true;
        }
        if (org.apache.commons.lang3.StringUtils.isBlank(httpPage.getRawText())) {
            logger.error("download page empty, check your link {}", lastRequestUrl);
            return true;
        }
        return false;
    }
}
