package com.chance.cc.crawler.development.scripts.cnr;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/11/23 18:27
 * @Description 央广网 首页
 **/
public class CnrCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(CnrCrawlerScript.class);

    public static final String entranceUrl = "http://www.cnr.cn/";//入口地址
    public static final String localEntranceUrl = "http://www.cnr.cn/shanghai/";//地区地址
    public static final String localModelUrl = "http://www.cnr.cn/shanghai/shzx/[a-zA-Z]+/|http://www.cnr.cn/shanghai/shzx/[a-zA-Z]+/index_\\d+.html";//地区模块地址
    public static final String modelEntranceUrl = "http://news.cnr.cn/";//模块地址
    public static final String articleUrl = "http://\\S*\\.cnr.cn/\\S*/\\d{8}/t\\d{8}_\\d+.shtml";


    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "cnr";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(entranceUrl);
        addUrlRegular(localEntranceUrl);
        addUrlRegular(localModelUrl);
        addUrlRegular(articleUrl);
        addUrlRegular(modelEntranceUrl);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();

        if (requestUrl.matches(entranceUrl)) {
            List<String> nodes = httpPage.getHtml().xpath("//div[@class=\"sites clearfix\"]/a/@href").all();//地区地址
            String modelSiteId = "";
            for (String itemUrl : nodes) {
                if(itemUrl.matches(localEntranceUrl)){
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(itemUrl)
                            .recordKey(itemUrl)
                            .notFilterRecord()
                            .releaseTime(System.currentTimeMillis())
                            .build();

                    String[] split = itemUrl.split("/");
                    modelSiteId = split[split.length - 1];
                    parsedLinks.add(itemRecord);
                }
            }

            List<String> all = httpPage.getHtml().xpath("//span[@class=\"nav_tit\"]/a/@href").all();//模块地址
            for (String itemUrl : all) {
                if(itemUrl.matches(modelEntranceUrl)){
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(itemUrl)
                            .recordKey(itemUrl)
                            .notFilterRecord()
                            .releaseTime(System.currentTimeMillis())
                            .build();

                    modelSiteId = itemUrl.substring(itemUrl.indexOf("//") + 2,itemUrl.indexOf("."));
                    parsedLinks.add(itemRecord);
                }
            }
        }else if(requestUrl.matches(localEntranceUrl)){
            List<String> nodes = httpPage.getHtml().xpath("//span[@class=\"ml40\"]/a/@href").all();
            for (String itemUrl : nodes) {

                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .notFilterRecord()
                        .releaseTime(System.currentTimeMillis())
                        .build();
                parsedLinks.add(itemRecord);

            }
        }else if(requestUrl.matches(localModelUrl)){
            String nextPageUrl = "";
            if(requestUrl.matches("http://www.cnr.cn/shanghai/shzx/[a-zA-Z]+/")){
                nextPageUrl = requestUrl +"index_1.html";
            }else if(requestUrl.matches("http://www.cnr.cn/shanghai/shzx/[a-zA-Z]+/index_\\d+.html")){
                String[] split = requestUrl.split("_");
                String[] split1 = split[1].split("\\.");
                int count = Integer.parseInt(split1[0]) + 1;
                nextPageUrl = split[0] + "_" + count + "." + split1[1];
            }

            if(StringUtils.isNotEmpty(nextPageUrl)){
                CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .build();

                parsedLinks.add(turnPageRequest);
            }

            List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"item\"]").nodes();
            for (Selectable node : nodes) {
                String itemUrl =  node.xpath("./a/@href").get();
                if (StringUtils.isBlank(itemUrl)) {
                    continue;
                }

                String releaseTime = node.xpath(".//span[@class=\"publishTime\"]/text()").get();
                if (StringUtils.isBlank(releaseTime)) {
                    continue;
                }

                try {
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .needParsed(false)
                            .resultLabelTag(article)
                            .build();

                    itemRecord.getHttpConfig().setCircularRedirectsAllowed(true);
                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }else if(requestUrl.matches(modelEntranceUrl)){
            //文章列表解析
            List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"item\"]").nodes();
            for (Selectable node : nodes) {
                String itemUrl = node.xpath("./a/@href").get();
                if (StringUtils.isBlank(itemUrl)) {
                    continue;
                }

                String releaseTime = node.xpath(".//span[@class=\"publishTime\"]/text()").get();
                if (StringUtils.isBlank(releaseTime)) {
                    continue;
                }

                try {
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .needParsed(false)
                            .resultLabelTag(article)
                            .build();

                    itemRecord.getHttpConfig().setCircularRedirectsAllowed(true);
                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));

        }

        // todo
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"TRS_Editor\"]//p//text()|//div[@class=\"TRS_Editor\"]//span//text()|//div[@class=\"article-body\"]//p//text()").all();
        if(articleTextList.size() == 0){
            articleTextList = httpPage.getHtml().xpath("//div[@class=\"Custom_UnionStyle\"]/div//text()").all();
        }

        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"source\"]/span").nodes();
        String author = "";
        String releaseTime = "";
        for (Selectable node : nodes) {
            String data = node.xpath(".//text()").get();
            if(data.matches("来源：\\S*")){
                author = data.substring(data.indexOf("：") + 1);
            }else if(data.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}")){
                releaseTime = data;
            }
        }

        String title = httpPage.getHtml().xpath("//div[@class=\"article-header\"]/h1").get();

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV(Field_Content, conents.toString().trim())
                    .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title))
                    .addContentKV(Field_Author, author)
                    .build();
        } catch (ParseException e) {
            e.printStackTrace();
        }
        return crawlerData;

    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }



}
