package com.chance.cc.crawler.development.scripts.yyjjb;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.development.scripts.yiyaoxw.YiYaoZXCrawlerScript;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.URLEncoder;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.turnPage;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.turnPageItem;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * created by ljl 20210630
 * 医药经济报  关键词查询板块
 */
public class YjjjbSearchCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(YjjjbSearchCrawlerScript.class);

    public static final String domain = "yyjjbSearch";
    public static final String scriptSite = "news";

    public static final String keysRegex = "https?://\\S*v1/meta/" + domain + "/keys\\S*";

    public static final String listUrlRegex = "http://www.yyjjb.com.cn/search\\S*";

    public static final String searchListRegex = "http://www.yyjjb.com.cn/search\\?page=\\d+&keyword=\\S*";
    public static final String articleUrlRegex1 = "http://www.yyjjb.com.cn/yyjjb/\\d{6}/\\d+_\\d+\\.shtml";
    public static final String articleUrlRegex2 = "http://www\\.yyjjb\\.com\\.cn/\\d{2}/\\d{2}/\\d+_\\d+\\.shtml";

    public static final String searchListFormat = "http://www.yyjjb.com.cn/search?page=%s&keyword=";

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();
        HttpRequest httpRequest = requestRecord.getHttpRequest();
        if (null == supportSourceRecords || supportSourceRecords.size() == 0){
            return super.prepareRequest(requestRecord, supportSourceRecords);
        }
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String supportUrl = supportSourceRecord.getHttpRequest().getUrl();
            if (supportUrl.matches(keysRegex)){
                initKeywordsRecord(allItemRecords,requestRecord,supportSourceRecord);
            }
        }
        return allItemRecords;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
//        if (doHttpPageCheck(crawlerRequestRecord,httpPage)){
//            addCrawlerRecords(parsedLinks,crawlerRequestRecord);
//            crawlerRequestRecord.setNeedWashPage(false);
//            return parsedLinks;
//        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(searchListRegex)){
            return parseSearchLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }

        return parsedLinks;
    }

    /**
     * 搜索结果列表页
     */
    private List<CrawlerRequestRecord> parseSearchLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {

        String url = httpPage.getRequest().getUrl();
        List<Selectable> itemNodes = httpPage.getHtml().xpath("//div[@class=\"result-item\"]").nodes();
        if (null != itemNodes && itemNodes.size() > 0) {
            //下一页解析
            int pageNum = Integer.parseInt(url.split("&")[0].split("=")[1]);
            String encodeKeyword = url.split("=")[2];
            String nextPageUrl = String.format(searchListFormat, (pageNum + 1)) + encodeKeyword;
            CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(nextPageRecord);

            //明细页解析
            for (Selectable itemNode : itemNodes) {
                String itemUrl = itemNode.xpath("./a/@href").get();
                itemUrl = "http://www.yyjjb.com.cn" + itemUrl;
                String pubTime = itemNode.xpath("./p/a/span[1]/text()").get();
                String author = itemNode.xpath("./p/a/span[2]/text()").get();
                author = author.substring(3);
                try {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(itemUrl)
                            .recordKey(itemUrl)
                            .releaseTime(DateUtils.parseDate(pubTime, "发布时间：yyyy-MM-dd").getTime())
                            .copyBizTags()
                            .needParsed(false)
                            .resultLabelTag(article)
//                            .resultLabelTag(interaction)
                            .build();
                    itemRecord.tagsCreator().bizTags().addCustomKV("author", author);
                    parsedLinks.add(itemRecord);
                } catch (Exception e) {
                    logger.error(e.getMessage());
                }
            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String articleKey = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1).split("\\.")[0];
        String rawText = httpPage.getRawText();
        if (StringUtils.isBlank(rawText)){
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }

        String site = crawlerRequestRecord.tagsCreator().bizTags().site();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            crawlerDataList.add(washArticle(crawlerRequestRecord, httpPage,site,articleKey));
        }
        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, String site, String articleKey) {
        try {

            List<String> texts = httpPage.getHtml().xpath("//div[@class=\"detail-article\"]/p/text()").all();
            List<String> allImages = httpPage.getHtml().xpath("//div[@class=\"detail-article\"]/p/img/@src").all();
            if(texts.get(0).startsWith("此内容为《医药经济报》融媒体平台原创")){
                texts = httpPage.getHtml().xpath("//div[@class=\"detail-article\"]/p/span/text()").all();
                allImages = httpPage.getHtml().xpath("//div[@class=\"detail-article\"]/p/span/img/@src").all();
            }
            StringBuffer conents = new StringBuffer();
            for (String text : texts) {
                conents.append(text).append(" ");
            }
            String content = unescapeHtml2J(conents.toString().trim());

            StringBuffer sbImage = new StringBuffer();
            for (String allImage : allImages) {
                sbImage.append(allImage).append("\\x01");
            }

            String releaseTimeStr = httpPage.getHtml().xpath("//div[@class=\"detail-title\"]/p/span[1]").get();

            String itemUrl = httpPage.getRequest().getUrl();

            String title = StringEscapeUtils.unescapeHtml(httpPage.getHtml().xpath("//div[@class=\"detail-title\"]/h2").get());

            String author = httpPage.getHtml().xpath("(//div[@class=\"detail-title\"]/p/span)[2]").get();
            if(author.startsWith("作者")){
                author = author.substring(3);
            }else{
                author = "";
            }

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", domain(), site, article.enumVal(), articleKey))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTimeStr, "发布时间：yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Images,sbImage.toString())
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            logger.error(e.getMessage(), e);
        }
        return null;
    }

    private void initKeywordsRecord(List<CrawlerRecord> allItemRecords, CrawlerRequestRecord requestRecord, CrawlerRequestRecord keywordRecord) {
        try {
            JSONObject jsonObject = JSONObject.parseObject(keywordRecord.getInternalDownloadPage().getRawText());
            if (jsonObject.getIntValue("status") == 0) {
                JSONArray objects = jsonObject.getJSONArray("content");
                for (Object object : objects) {
                    String keyword = ((JSONObject) object).getString("keyword");
                    String listUrl = String.format(searchListFormat,1) + URLEncoder.encode(keyword);
                    CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(requestRecord)
                            .httpUrl(listUrl)
                            .recordKey(listUrl)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .copyBizTags()
                            .build();
                    listRecord.tagsCreator().bizTags().addKeywords(keyword);
                    allItemRecords.add(listRecord);
                }
            }
        } catch (Exception e) {
            logger.error("from keywords init urls failed");
            logger.error(e.getMessage(), e);
        }
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(listUrlRegex);        //搜索首页
        addUrlRegular(searchListRegex);     //关键词搜索结果列表页
        addUrlRegular(articleUrlRegex1);    //详情-形式1
        addUrlRegular(articleUrlRegex2);    //详情-形式2
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String crawlerSite = crawlerRequestRecord.tagsCreator().bizTags().site();
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return domain;
    }


    /**
     * 检查页面下载是否成功、完整
     *
     * @param crawlerRequestRecord last record
     * @param httpPage             page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200) {
            logger.error("download page {} error, status code is {}", lastRequestUrl, statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()) {
            logger.error("download page failed, check your link {}", lastRequestUrl);
            return true;
        }
        if (StringUtils.isBlank(httpPage.getRawText())) {
            logger.error("download page empty, check your link {}", lastRequestUrl);
            return true;
        }
        return false;
    }

    public static String unescapeHtml2J(String str){
        int times = 0;
        while (str.contains("&") && str.contains(";")){
            str = StringEscapeUtils.unescapeHtml(str);
            times ++;
            if (times > 5){
                break;
            }
        }
        return str;
    }

}
