package com.chance.cc.crawler.development.scripts.china;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/12/17 10:38
 * @Description 中华网
 **/
public class ChinaCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(ChinaCrawlerScript.class);

    private static final String chinaPrefix = "https://\\S*.china.com";
    private static final String chinaEntranceUrl = "https://www.china.com/";
    private static final String moduleEntranceUrl = "https://(news|military).china.com\\/*";
    private static final String moduleArticleUrl = "https://\\S*.china.com/[a-zA-Z0-9]+\\/*[index\\.html]*";
    private static final String articleUrl = "https://\\S*.china.com/\\S*/[0-9_]+.html";
    private static final String jsonArticleUrl = "https://\\S*.china.com/\\S*\\?page=\\d+\\S*id=\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "china";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(chinaEntranceUrl);
        addUrlRegular(moduleEntranceUrl);
        addUrlRegular(moduleArticleUrl);
        addUrlRegular(articleUrl);
        addUrlRegular(jsonArticleUrl);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();
        String webSite = requestUrl.substring(requestUrl.indexOf("//") + 2, requestUrl.indexOf("."));

        boolean downloadSuccess = httpPage.isDownloadSuccess();
        int statusCode = httpPage.getStatusCode();
        if (downloadSuccess && statusCode == 200) {
            if (requestUrl.matches(chinaEntranceUrl)) {
                List<String> all = httpPage.getHtml().xpath("//div[@class=\"con clearfix\"]//a/@href").all();
                for (String url : all) {
                    url = StringUtils.isNotEmpty(url) ? "https:" + url : "";
                    if (url.matches(moduleEntranceUrl)) {
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(url)
                                .recordKey(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .build();

                        parsedLinks.add(itemRecord);
                    }
                }
            } else if (requestUrl.matches(moduleEntranceUrl)) {
                List<String> all = new ArrayList<>();
                if(webSite.matches("military")){
                    all = httpPage.getHtml().xpath("//div[@class=\"w1200\"]/a/@href").all();
                }else if(webSite.matches("news")){
                    all = httpPage.getHtml().xpath("//div[@id=\"newsNav\"]/a/@href").all();
                }
                for (String url : all) {
                    if(webSite.matches("news")){
                        url = StringUtils.isNotEmpty(url) ? "https:" + url : "";
                    }else if(webSite.matches("military")){
                        String module = url.substring(1);
                        switch (module){
                            case "news": url = "https://military.china.com/news/ajax_column_more?page=1&qudao=&column_pid=568&column=news";break;
                            case "global": url = "https://military.china.com/news/ajax_column_more?page=1&qudao=&column_pid=569&column=global";break;
                            case "topic": url = "https://military.china.com/news/ajax_column_more?page=1&qudao=&column_pid=11166766&column=topic";break;
                            case "history": url = "https://military.china.com/news/ajax_column_more?page=1&qudao=&column_pid=62&column=history";break;
                            case "jxkt": url = "https://military.china.com/news/ajax_column_more?page=1&qudao=&column_pid=11103358&column=jxkt";break;
                        }
                    }
                    if (url.matches(moduleArticleUrl) || url.matches(jsonArticleUrl)) {
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(url)
                                .recordKey(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .build();

                        parsedLinks.add(itemRecord);
                    }
                }
            } else if (requestUrl.matches(moduleArticleUrl)) {
                //解析文章列表
                List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"item-con-inner\"]|//div[contains(@class,'item-focus')]|//div[@class=\"defaultList\"]/div").nodes();
                for (Selectable node : nodes) {
                    String itemUrl = node.xpath("./h3/a/@href|./a/@href").get();
                    if (StringUtils.isEmpty(itemUrl)) {
                        continue;
                    }

                    String releaseTime = node.xpath(".//span[@class=\"time\"]").get();
                    if (StringUtils.isEmpty(itemUrl)) {
                        continue;
                    }

                    try {
                        String parsePatterns = "";
                        if (releaseTime.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}")) {
                            parsePatterns = "yyyy-MM-dd HH:mm";
                        } else if (releaseTime.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}")) {
                            parsePatterns = "yyyy-MM-dd HH:mm:ss";
                        }
                        long releaseTimeToLong = DateUtils.parseDate(releaseTime, parsePatterns).getTime();
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .httpUrl(itemUrl)
                                .recordKey(itemUrl)
                                .releaseTime(releaseTimeToLong)
                                .build();

                        parsedLinks.add(itemRecord);
                    } catch (ParseException e) {
                        log.error("时间格式错误！正确的时间是：" + releaseTime);
                    }
                }
            } else if (requestUrl.matches(articleUrl)) {
                //判断文章是否翻页
                List<Selectable> sumPage = httpPage.getHtml().xpath("//div[@id=\"chan_multipageNumN\"]/a").nodes();
                if (sumPage != null && sumPage.size() > 0) {
                    for (Selectable selectable : sumPage) {
                        String text = selectable.xpath("./text()").get();
                        if (text.matches("\\d+")) {
                            String url = selectable.xpath("./@href").get();
                            url = StringUtils.isNotEmpty(url) ? requestUrl.substring(0, requestUrl.lastIndexOf("/") + 1) + url : "";
                            if (StringUtils.isNotEmpty(url)) {
                                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                        .itemPageRequest(crawlerRequestRecord)
                                        .httpUrl(url)
                                        .recordKey(url)
                                        .releaseTime(crawlerRequestRecord.getReleaseTime())
                                        .needParsed(false)
                                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                                        .build();

                                parsedLinks.add(itemRecord);
                            }
                        }
                    }
                } else {
                    crawlerRequestRecord.tagsCreator().resultTags().addResultDataType(CrawlerEnum.CrawlerDataType.article);
                }
            }else if(requestUrl.matches(jsonArticleUrl)){
                //下一页地址解析
                String[] split = requestUrl.split("\\?");
                String[] split1 = split[1].split("&");
                String nextPageUrl = split[0] + "?";
                for (String param : split1) {
                    if(param.matches("page=\\d+")){
                        String[] split2 = param.split("=");
                        param = split2[0] + "=" + (Integer.parseInt(split2[1]) + 1);
                    }
                    nextPageUrl = nextPageUrl + param + "&";
                }
                //去掉最后一个&
                nextPageUrl = nextPageUrl.substring(0,nextPageUrl.length() - 1);

                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(nextPageUrl)
                        .recordKey(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .build();

                parsedLinks.add(turnRecord);


                //文章列表解析
                List<String> all = httpPage.getJson().jsonPath($_type + ".content").all();
                for (String data : all) {
                    Json json = new Json(data);
                    String itemUrl = json.jsonPath($_type + ".url").get();
                    if(StringUtils.isEmpty(itemUrl)){
                        continue;
                    }else{
                        itemUrl = getString(chinaPrefix,requestUrl) + itemUrl;
                    }

                    String releaseTime = json.jsonPath($_type + ".delivery_time").get();
                    if(StringUtils.isEmpty(releaseTime)){
                        continue;
                    }


                    try {
                        String parsePatterns = "";
                        if (releaseTime.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}")) {
                            parsePatterns = "yyyy-MM-dd HH:mm";
                        } else if (releaseTime.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}")) {
                            parsePatterns = "yyyy-MM-dd HH:mm:ss";
                        }
                        long releaseTimeToLong = DateUtils.parseDate(releaseTime, parsePatterns).getTime();
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .httpUrl(itemUrl)
                                .recordKey(itemUrl)
                                .releaseTime(releaseTimeToLong)
                                .build();

                        parsedLinks.add(itemRecord);
                    } catch (ParseException e) {
                        log.error("时间格式错误！正确的时间是：" + releaseTime);
                    }
                }
            }
        } else {
            log.error("页面(“" + requestUrl + "”)下载失败！");
        }

        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        StringBuffer conents = new StringBuffer();
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            List<String> articleTextList = internalDownloadRecord.getInternalDownloadPage().getHtml().xpath("//div[@id=\"chan_newsDetail\"]/p//text()").all();
            for (String articleText : articleTextList) {
                conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
            }
        }
        Map<String, Object> extras = new HashMap<>();
        extras.put(Field_Content, conents.toString());
        crawlerRecord.getHttpRequest().setExtras(extras);
        crawlerRecord.tagsCreator().resultTags().addResultDataType(CrawlerEnum.CrawlerDataType.article);
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        String url = page.getRequest().getUrl();

        if (crawlerResultTags.hasDataType(CrawlerEnum.CrawlerDataType.article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        // todo
        return crawlerDataList;
    }


    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        String title = httpPage.getHtml().xpath("//h1[@id=\"chan_newsTitle\"]|//h1[@class=\"article-main-title\"]").get();
        String author = httpPage.getHtml().xpath("//span[@class=\"source\"]//text()").get();


        List<String> articleTextList = httpPage.getHtml().xpath("//div[@id=\"chan_newsDetail\"]/p//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }
        //添加分页数据
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
        if (extras != null) {
            conents.append(extras.get(Field_Content));
        }

        String releaseTime = httpPage.getHtml().xpath("//div[@class=\"chan_newsInfo_source\"]/span[@class=\"time\"]/text()|//div[@class=\"time-source\"]/span[@class=\"time\"]/text()").get();

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV(Field_Content, conents.toString())
                    .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title))
                    .addContentKV(Field_Author, author)
                    .build();
        } catch (ParseException e) {
            log.error("文章(“" + itemUrl + "”)时间格式错误!正确时间是：" + releaseTime);
        }
        return crawlerData;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

}
