package com.chance.cc.crawler.development.scripts.moe;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/12/11 13:55
 * @Description 教育部官网
 **/
public class MoeCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(MoeCrawlerScript.class);

    private static final String moePrefix = "http://www.moe.gov.cn/";
    private static final String moeEntranceUrl = "http://www.moe.gov.cn/";
    private static final String moduleEntranceUrl = "http://www.moe.gov.cn/[a-zA-Z0-9_/]*/";
    private static final String nextUrl = "http://www.moe.gov.cn/\\S*/index_\\d+.html";
    private static final String articleUrl = "http://www.moe.gov.cn/\\S*/\\d+/t\\d+_\\d+.html";

    private static final List<String> modules = Arrays.asList("jyb_xwfb/gzdt_gzdt/","jyb_xwfb/s271/","jyb_xwfb/s5147/","jyb_xwfb/s5148/","jyb_zwfw/zwfw_gdfw/","jyb_hygq/hygq_zczx/moe_1346/","jyb_sy/shizheng/",
            "jyb_sy/sy_jyyw/","jyb_xxgk/s5743/s5744/","jyb_sjzl/s3165/","jyb_xwfb/s6192/s133/");


    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "moe";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(moeEntranceUrl);
        addUrlRegular(moduleEntranceUrl);
        addUrlRegular(nextUrl);
        addUrlRegular(articleUrl);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        boolean downloadSuccess = httpPage.isDownloadSuccess();
        if (downloadSuccess) {
            String requestUrl = httpPage.getRequest().getUrl();
            int statusCode = httpPage.getStatusCode();

            if (statusCode == 200) {
                if (requestUrl.matches(moeEntranceUrl)) {
                    //首页地址解析
//                    List<String> all = httpPage.getHtml().xpath("//div[@class=\"scy-nav mhide\"]/ul/li/dl//a/@href").all();
                    for (String module : modules) {
                        String url = moePrefix + module;
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(url)
                                .recordKey(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .build();

                        parsedLinks.add(itemRecord);
                    }
                } else if (requestUrl.matches(moduleEntranceUrl) || requestUrl.matches(nextUrl)) {
                    //下一页地址的解析
                    String nextPageUrl = "";
                    if (requestUrl.matches(moduleEntranceUrl)) {
                        nextPageUrl = requestUrl + "index_1.html";
                    } else {
                        String[] split = requestUrl.split("index_");
                        String[] split1 = split[1].split("\\.");
                        nextPageUrl = split[0] + "index_" + (Integer.parseInt(split1[0]) + 1) + "." + split1[1];
                    }

                    if (StringUtils.isNotEmpty(nextPageUrl)) {
                        CrawlerRequestRecord turnPageRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(nextPageUrl)
                                .recordKey(nextPageUrl)
                                .releaseTime(System.currentTimeMillis())
                                .build();

                        parsedLinks.add(turnPageRecord);
                    }


                    //文章列表解析
                    List<Selectable> nodes = httpPage.getHtml().xpath("//ul[@id=\"list\"]/li").nodes();
                    for (Selectable node : nodes) {
                        String itemUrl = node.xpath("./a/@href").get();
                        if (StringUtils.isEmpty(itemUrl) || !itemUrl.matches("[\\./]+\\S*.html")) {
                            continue;
                        } else {
                            if(itemUrl.matches("./\\S*")){
                                itemUrl = requestUrl.matches(moduleEntranceUrl) ? requestUrl + itemUrl.substring(itemUrl.indexOf("/") + 1) : requestUrl.substring(0,requestUrl.lastIndexOf("/") + 1) + itemUrl.substring(itemUrl.indexOf("/") + 1);
                            }else if(itemUrl.matches("../../\\S*")){
                                itemUrl =  moePrefix + itemUrl.substring(6);
                            }
                        }
                        String title = node.xpath("./a/@title").get();

                        String releaseTime = node.xpath("span/text()").get();
                        if (StringUtils.isEmpty(releaseTime)) {
                            continue;
                        }

                        try {
                            long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime();
                            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                    .itemPageRequest(crawlerRequestRecord)
                                    .httpUrl(itemUrl)
                                    .recordKey(itemUrl)
                                    .needParsed(false)
                                    .resultLabelTag(article)
                                    .releaseTime(releaseTimeToLong)
                                    .build();

                            Map<String,Object> extras = new HashMap<>();
                            extras.put("title",title);
                            itemRecord.getHttpRequest().setExtras(extras);

                            parsedLinks.add(itemRecord);
                        } catch (ParseException e) {
                            log.warn("时间格式错误！正确时间是：" + releaseTime);
                        }
                    }

                }
            } else {
                crawlerRequestRecord.setNeedWashPage(false);
                log.info("页面(“ " + requestUrl + "”)下载错误！状态码：" + statusCode);
            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        int statusCode = page.getStatusCode();
        String requestUrl = page.getRequest().getUrl();
        if (statusCode == 200) {
            if (crawlerResultTags.hasDataType(article)) {
                crawlerDataList.add(washArticle(crawlerRecord, page));
            }
        }


        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();

        String title = (String) httpPage.getRequest().getExtras().get("title");

        String source = "";
        if(itemUrl.contains("/jyb_xxgk/s5743/s5744/")){
            source = httpPage.getHtml().xpath("//div[@id=\"detail-date-source\"]/text()").get();
        }else{
            String sourceAndTime = httpPage.getHtml().xpath("//div[@class=\"moe-detail-shuxing\"]/text()|//div[@id=\"content_date_source\"]").get();
            if (StringUtils.isNotEmpty(sourceAndTime)) {
                source = getString("来源：\\S*", sourceAndTime);
            }
        }
        if(StringUtils.isNotBlank(source)){
            source = source.substring(source.lastIndexOf("：") + 1);
        }

        String editor = httpPage.getHtml().xpath("//div[contains(@id,'editor')]/text()").get();
        editor = StringUtils.isNotEmpty(editor) ? editor.substring(editor.lastIndexOf("：") + 1, editor.lastIndexOf("）")) : "";

        List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"TRS_Editor\"]/p//text()").all();
        StringBuffer conents = new StringBuffer();
        int size = articleTextList.size();
        for (int i = 0; i < size; i++) {
            if (StringUtils.isNotEmpty(articleTextList.get(i))) {
                conents.append(articleTextList.get(i)).append(" ");
            }
        }

        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        CrawlerData crawlerData = null;


        crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                .url(itemUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_Content, conents.toString().trim())
                .addContentKV(Field_Title, title.trim())
//                .addContentKV("editor",editor)
                .addContentKV(Field_Source,source)
                .build();
        return crawlerData;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

}
