package com.chance.cc.crawler.development.scripts.iqilu;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/12/2 16:34
 * @Description
 *      齐鲁网
 **/
public class IqiluCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(IqiluCrawlerScript.class);

    public static final String iqiluEntranceUrl = "http://www.iqilu.com/";
    public static final String moduleEntranceUrl ="http://news.iqilu.com/";
    public static final String moduleArticleUrl = "http://news.iqilu.com/[a-zA-Z0-9\\/]+/";
    public static final String nextModuleArticleUrl = "http://\\S+.iqilu.com/\\S*/list_\\d+_\\d+.shtml";
    public static final String articleUrl = "http://\\S+.iqilu.com/\\S*/\\d{4}\\/*\\d{4}/\\d*.shtml";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "iqilu";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(iqiluEntranceUrl);
        addUrlRegular(moduleEntranceUrl);
        addUrlRegular(moduleArticleUrl);
        addUrlRegular(nextModuleArticleUrl);
        addUrlRegular(articleUrl);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();
        String webSite = requestUrl.substring(requestUrl.indexOf("//") + 2, requestUrl.indexOf("."));

        if(requestUrl.matches(iqiluEntranceUrl)){
            List<String> all = httpPage.getHtml().xpath("//div[@class=\"nav-part-a-link\"]/a/@href").all();
            for (String url : all) {
                if(url.matches(moduleEntranceUrl)){
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(url)
                            .recordKey(url)
                            .notFilterRecord()
                            .releaseTime(System.currentTimeMillis())
                            .build();
                    parsedLinks.add(itemRecord);
                }
            }
        }else if(requestUrl.matches(moduleEntranceUrl)){
            List<String> all = httpPage.getHtml().xpath("//ul[@class=\"wrapper\"]/li/a/@href").all();

            for (String url : all) {
                String webSiteUrl = url.substring(url.indexOf("//") + 2,url.indexOf("."));
                if(webSiteUrl.equals(webSite) && url.matches(moduleArticleUrl)){
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(url)
                            .recordKey(url)
                            .notFilterRecord()
                            .releaseTime(System.currentTimeMillis())
                            .build();
                    parsedLinks.add(itemRecord);
                }
            }
        }else if(requestUrl.matches(moduleArticleUrl) || requestUrl.matches(nextModuleArticleUrl)){
            //板块解析
            if(requestUrl.matches(moduleArticleUrl)){
                List<String> all = httpPage.getHtml().xpath("//div[@class=\"mod-list\"]/div[@class=\"tit-sub\"]/h2/a/@href").all();
                for (String url : all) {
                    if(url.matches(moduleArticleUrl) && !url.equals(requestUrl)){
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(url)
                                .recordKey(url)
                                .notFilterRecord()
                                .releaseTime(System.currentTimeMillis())
                                .build();
                        parsedLinks.add(itemRecord);
                    }
                }
            }

            //下一页地址的解析
            String nextPageUrl = httpPage.getHtml().xpath("//a[@class=\"next\"]/@href").get();

            if(StringUtils.isNotEmpty(nextPageUrl)){
                CrawlerRequestRecord nextRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .needWashed(false)
                        .releaseTime(System.currentTimeMillis())
                        .build();

                parsedLinks.add(nextRecord);


                //文章列表解析
                List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"news-pic-item\"]").nodes();
                for (Selectable node : nodes) {
                    String itemUrl = node.xpath(".//h3/a/@href").get();
                    if(StringUtils.isEmpty(itemUrl)){
                        continue;
                    }

                    String releaseTime = node.xpath(".//span[@class=\"time\"]/text()").get();
                    if(StringUtils.isEmpty(releaseTime)){
                        continue;
                    }

                    try {
                        long releaseTimeToLong = DateUtils.parseDate(releaseTime,"yyyy-MM-dd").getTime();

                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(itemUrl)
                                .httpUrl(itemUrl)
                                .needParsed(false)
                                .resultLabelTag(article)
                                .releaseTime(releaseTimeToLong)
                                .build();

                        parsedLinks.add(itemRecord);
                    } catch (ParseException e) {
                        e.printStackTrace();
                    }
                }
            }

        }

        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        int statusCode = page.getStatusCode();
        if (statusCode == 200) {
            if (crawlerResultTags.hasDataType(article)) {
                crawlerDataList.add(washArticle(crawlerRecord, page));
            }
        } else {
            log.info("该文章(“" + page.getRequest().getUrl() + "”)下载错误，错误码：" + statusCode);
        }

        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String webSite = itemUrl.substring(itemUrl.indexOf("//") + 2, itemUrl.indexOf("."));

        String title = httpPage.getHtml().xpath("//h1/text()").get();
        String source = httpPage.getHtml().xpath("//p[@class=\"resource\"]/span/text()").get();
        String releaseTime = httpPage.getHtml().xpath("//p[@class=\"time\"]/text()").get();
        String editor = httpPage.getHtml().xpath("//div[@class=\"article-main\"]/p[@class=\"author\"]/span/text()").get();
        editor = StringUtils.isNotEmpty(editor) ? editor : "";
        String author = httpPage.getHtml().xpath("//div[@class=\"info\"]/p[@class=\"author\"]/span/text() ").get();
        author = StringUtils.isNotEmpty(author) ? (author.contains("作者：") ? author.substring(author.indexOf("：") + 1) : author) : "";

        List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"article-main\"]//p[not(@class)]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            if (StringUtils.isNotEmpty(articleText)) {
                conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
            }
        }

        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1 , itemUrl.lastIndexOf("."));

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime,"yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV(Field_Content, conents.toString().trim())
//                    .addContentKV("editor",editor)
                    .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title).trim())
                    .addContentKV(Field_Source, source)
                    .addContentKV(Field_Author,author)//作者大部分为null
                    .build();
        } catch (ParseException e) {
            e.printStackTrace();
        }
        return crawlerData;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }
}
