package com.chance.cc.crawler.development.scripts.chinanews;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/11/27 17:35
 * @Description 中国新闻网
 **/
public class ChinanewsCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(ChinanewsCrawlerScript.class);

    public static final String localPrefix = "(http|https)://www.[a-zA-Z]*.chinanews.com/";
    public static final String entranceUrl = "http\\S*://www.chinanews.com/";//入口地址
    public static final String modelEntranceUrl = "http\\S*://www.chinanews.com/[a-zA-Z]*/";//板块入口地址
    public static final String scrollNewsModelEntranceUrl = "http\\S*://www.chinanews.com/scroll-news/news\\d+.html";//滚动新闻的入口地址
    public static final String localEntranceUrl = "http\\S*://www.sh.chinanews.com/";//地区新闻的入口地址
    public static final String localModelUrl = "http://www.sh.chinanews.com/\\S+/index.shtml";//地区新闻中的板块地址
    public static final String scrollNewsArticleUrl = "http\\S*://www.chinanews.com/\\S*/\\d{4}/\\d{2}-\\d{2}/\\S*\\.shtml";//滚动新闻文章地址
    public static final String localArticleUrl = "http://www.sh.chinanews.com/\\S+/\\d{4}-\\d{2}-\\d{2}/\\d+.shtml";//地区新文文章地址

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "chinanews";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(entranceUrl);
//        addUrlRegular(modelEntranceUrl);
        addUrlRegular(scrollNewsModelEntranceUrl);
        addUrlRegular(localEntranceUrl);
        addUrlRegular(localModelUrl);
        addUrlRegular(scrollNewsArticleUrl);
        addUrlRegular(localArticleUrl);

    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();

        if (requestUrl.matches(entranceUrl)) {
            List<String> modelLinks = httpPage.getHtml().xpath("//ul[@class=\"nav_navcon\"]/li/a/@href").all();//各个板块地址
            for (String modelLink : modelLinks) {
                modelLink = "http:" + modelLink;
                if (modelLink.matches(scrollNewsModelEntranceUrl)) {

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(modelLink)
                            .recordKey(modelLink)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .build();

                    parsedLinks.add(itemRecord);
                }
            }

            List<String> localList = httpPage.getHtml().xpath("//div[@class=\"syfs4_left\"]/a/@href").all();//各个地区地址
            for (String local : localList) {
                if(local.matches(localEntranceUrl)){
                    String[] split = local.split("\\.");
                    String key = split[1];

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(local)
                            .recordKey(local)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .build();

                    parsedLinks.add(itemRecord);
                }
            }
        } else if (requestUrl.matches(scrollNewsModelEntranceUrl)) {
            //下一页地址解析
            String[] split = requestUrl.split("/news");
            String[] split2 = split[1].split("\\.");
            String nextPageUrl = split[0] + "/news" + (Integer.parseInt(split2[0]) + 1) + "." + split2[1];

            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .build();

            parsedLinks.add(turnPageRequest);


            //文章列表解析
            List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"content_list\"]/ul/li").nodes();
            for (Selectable node : nodes) {
                String itemUrl = "http:" + node.xpath("./div[@class=\"dd_bt\"]/a/@href").get();
                if (StringUtils.isBlank(itemUrl)) {
                    continue;
                }

                String releaseTime = node.xpath("./div[@class=\"dd_time\"]/text()").get();
                if (StringUtils.isBlank(releaseTime)) {
                    continue;
                }

                try {
                    releaseTime = getString("\\d{4}", itemUrl) + "-" + releaseTime;
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .needParsed(false)
                            .resultLabelTag(article)
                            .build();

                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }else if(requestUrl.matches(localEntranceUrl)){
            List<String> modelLinks = httpPage.getHtml().xpath("//div[@class=\"tp_nav\"]/ul/li/a/@href").all();
            for (String modelLink : modelLinks) {
                modelLink =  getString(localPrefix,requestUrl) + modelLink.substring(1);
                if (modelLink.matches(localModelUrl)) {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(modelLink)
                            .recordKey(modelLink)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .build();

                    parsedLinks.add(itemRecord);
                }
            }
        }else if(requestUrl.matches(localModelUrl)){
            //文章列表解析
            List<Selectable> nodes = httpPage.getHtml().xpath("//ul[@class=\"branch_list_ul paging\"]/li").nodes();
            for (Selectable node : nodes) {
                String itemUrl =  node.xpath("./div[@class=\"con_title\"]/a/@href").get();
                if (StringUtils.isBlank(itemUrl)) {
                    continue;
                }else{
                    itemUrl = getString(localPrefix,requestUrl) + itemUrl;
                }

                String releaseTime = node.xpath("./div[@class=\"date\"]/text()").get();
                if (StringUtils.isBlank(releaseTime)) {
                    continue;
                }

                try {
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .needParsed(false)
                            .resultLabelTag(article)
                            .build();

                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        String url = page.getRequest().getUrl();
        if(url.matches(scrollNewsArticleUrl)){
            if (crawlerResultTags.hasDataType(article)) {
                crawlerDataList.add(washScrollNewsArticle(crawlerRecord, page));

            }
        }else if(url.matches(localArticleUrl)){
            if (crawlerResultTags.hasDataType(article)) {
                crawlerDataList.add(washLocalArticle(crawlerRecord, page));

            }
        }

        // todo
        return crawlerDataList;
    }

    public CrawlerData washScrollNewsArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"left_zw\"]/p//text()|//div[@class=\"content_desc\"]/p//text()").all();

        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        String author = httpPage.getHtml().xpath("//a[@class=\"source\"]/text()").get();
        if(StringUtils.isEmpty(author)){
            author = httpPage.getHtml().xpath("//div[@class=\"left-t\"]//text()|//div[@class=\"left\"]/p/text()").get();
            author = StringUtils.isNotEmpty(author) && author.contains("来源：") ?
                    author.substring(author.lastIndexOf("：") + 1) : "";
        }

        String title = httpPage.getHtml().xpath("//div[@class=\"content\"]//h1").get();
        if(StringUtils.isEmpty(title)){
            title = httpPage.getHtml().xpath("//div[@class=\"left\"]/h1").get();
        }

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                .url(itemUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_Content, conents.toString().trim())
                .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title))
                .addContentKV(Field_Author, author)
                .build();
        return crawlerData;

    }

    public CrawlerData washLocalArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"cms-news-article-content-blocknew\"]/p//text()").all();

        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        String timeAndAuthor =  httpPage.getHtml().xpath("//div[@class=\"cms-news-article-title-source\"]//text()").get();
        String author =  StringUtils.isNotEmpty(timeAndAuthor) && timeAndAuthor.contains("来源：") ?
                timeAndAuthor.substring(timeAndAuthor.lastIndexOf("：") + 1) : "";

        String releaseTime = StringUtils.isNotEmpty(timeAndAuthor) ? getString("\\d{4}年\\d{2}月\\d{2}日 \\d{2}:\\d{2}",timeAndAuthor) : "";

        String title = httpPage.getHtml().xpath("//div[@class=\"cms-news-article-title\"]/span").get();

        if(StringUtils.isNotEmpty(releaseTime)){
            CrawlerData crawlerData = null;
            try {
                crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                        .url(itemUrl)
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy年MM月dd日 HH:mm").getTime())
                        .addContentKV(Field_Content, conents.toString().trim())
                        .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title))
                        .addContentKV(Field_Author, author)
                        .build();
            } catch (ParseException e) {
                e.printStackTrace();
            }
            return crawlerData;
        }
        return null;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

}
