package com.chance.cc.crawler.development.scripts.people;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/12/14 17:57
 * @Description 人民网
 **/
public class PeopleCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(PeopleCrawlerScript.class);

    private static final String peoplePrefix = "http://\\S*.people.com.cn";
    private static final String peopleEntranceUrl = "http://people.com.cn/";
    private static final String moduleEntranceUrl = "http://(politics|military|legal|society|industry|edu|culture|scitech|world|finance|health).people.com.cn/";
    private static final String moduleArticleUrl = "http://\\S+.people.com.cn/GB/\\S+/index\\d*.html";
    private static final String articleUrl = "http://\\S+.people.com.cn/\\S*/\\d{4}/\\d{4}/c\\d+-\\d+.html";

    private static final List<String> noCrawlerModules = Arrays.asList("8198","107182","57505","387261","376714","402602","387596","430031","391252","43331","431474",
            "420650","119388","229589","416757","239203","244541","421076","405419","430418","395441","420321","426579","420834","402064","432282","25509","61045","427072");

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "people";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(peopleEntranceUrl);
        addUrlRegular(moduleEntranceUrl);
        addUrlRegular(moduleArticleUrl);
        addUrlRegular(articleUrl);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();


        String requestUrl = httpPage.getRequest().getUrl();
        int statusCode = httpPage.getStatusCode();

        if (statusCode == 200) {
            if (requestUrl.matches(peopleEntranceUrl)) {
                List<String> all = httpPage.getHtml().xpath("//div[@class=\"w1000\"]/span/a/@href").all();
                for (String url : all) {
                    if(url.matches(moduleEntranceUrl)){
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(url)
                                .recordKey(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .build();

                        parsedLinks.add(itemRecord);
                    }
                }
            } else if (requestUrl.matches(moduleEntranceUrl)) {
                List<String> all = httpPage.getHtml().xpath("//div[@class=\"pd_nav w1000 white mt15\"]/a/@href|//div[@class=\"w1000 clearfix white nav_ch\"]/a/@href|//div[@class=\"navInner navInner_01\"]/ul/li//span/a/@href").all();
                for (String url : all) {
                    url = url.matches(moduleArticleUrl) ? url : getString(peoplePrefix,requestUrl) + url;
                    if (url.matches(moduleArticleUrl)) {
                        String[] split = url.split("/");
                        if (!noCrawlerModules.contains(split[split.length - 2])) {
                            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                    .turnPageRequest(crawlerRequestRecord)
                                    .httpUrl(url)
                                    .recordKey(url)
                                    .releaseTime(System.currentTimeMillis())
                                    .notFilterRecord()
                                    .build();

                            parsedLinks.add(itemRecord);
                        }
                    }
                }
            } else if (requestUrl.matches(moduleArticleUrl)) {
                if(requestUrl.matches("http://world.people.com.cn/GB/1029/index.html")){
                    List<String> all = httpPage.getHtml().xpath("//li[@class=\"more\"]/a/@href").all();
                    for (String url : all) {
                        url = getString(peoplePrefix,requestUrl) + url;

                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(url)
                                .recordKey(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .build();

                        parsedLinks.add(itemRecord);
                    }
                }else {
                    String site = requestUrl.substring(requestUrl.indexOf("//") + 2,requestUrl.indexOf("."));
                    //下一页地址的解析
                    List<Selectable> nodes = new ArrayList<>();
                    if(site.matches("politics|tw|military|opinion|legal|society|industry|house|edu|culture|scitech")){
                        nodes = httpPage.getHtml().xpath("//div[@class=\"page_n clearfix\"]/a").nodes();
                    }else if(site.matches("world")){
                        nodes = httpPage.getHtml().xpath("//div[@class=\"ej_page\"]/a").nodes();
                    }else if(site.matches("finance")){
                        nodes = httpPage.getHtml().xpath("//div[@class=\"ej_page1 center\"]/a").nodes();
                    }else if(site.matches("health")){
                        nodes = httpPage.getHtml().xpath("//div[@class=\"pageNum\"]/a").nodes();
                    }
                    String nextPageUrl = "";
                    for (Selectable node : nodes) {
                        String text = node.xpath("./text()").get();
                        if ("下一页".equals(text)) {
                            nextPageUrl = node.xpath("./@href").get();
                            break;
                        }
                    }

                    if (StringUtils.isNotEmpty(nextPageUrl)) {
                        nextPageUrl = requestUrl.substring(0, requestUrl.lastIndexOf("/") + 1) + nextPageUrl;
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(nextPageUrl)
                                .recordKey(nextPageUrl)
                                .releaseTime(System.currentTimeMillis())
                                .build();

                        parsedLinks.add(itemRecord);
                    }


                    //文章列表解析
                    List<Selectable> nodes1 = new ArrayList<>();
                    if(site.matches("politics|military|legal|society|industry|edu|culture|scitech")){
                        nodes1 = httpPage.getHtml().xpath("//div[@class=\"ej_list_box clear\"]/ul/li").nodes();
                    }else if(site.matches("world")){
                        nodes1 =  httpPage.getHtml().xpath("//div[@class=\"ej_bor\"]/ul/li").nodes();
                    }else if(site.matches("finance")){
                        nodes1 = httpPage.getHtml().xpath("//ul[@class=\"list_14 list_ej\"]/li").nodes();
                    }else if(site.matches("tw|opinion|house")){
                        nodes1 = httpPage.getHtml().xpath("//ul[@class=\"list_14 clearfix\"]/li").nodes();
                    }else if(site.matches("health")){
                        nodes1 = httpPage.getHtml().xpath("//ul[@class=\"list_02\"]//div[@class=\"newsItems\"]").nodes();
                    }
                    for (Selectable selectable : nodes1) {
                        String itemUrl = selectable.xpath("./a/@href").get();
                        if (StringUtils.isEmpty(itemUrl)) {
                            continue;
                        }
                        itemUrl = itemUrl.matches(articleUrl) ? itemUrl : getString(peoplePrefix, requestUrl) + itemUrl;

                        String releaseTime = "";
                        if(site.matches("politics|military|legal|society|industry|edu|culture|scitech")){
                            releaseTime = selectable.xpath("./em/text()").get();
                        }else if(site.matches("world")){
                            releaseTime = selectable.xpath("./i/text()").get();
                            releaseTime = StringUtils.isNotEmpty(releaseTime) ? getString("\\d{4}-\\d{2}-\\d{2} \\d{2}：\\d{2}",releaseTime) : "";
                        }else if(site.matches("finance")){
                            releaseTime = selectable.xpath("./text()").get();
                            releaseTime = LocalDate.now().getYear() + getString("\\d+月\\d+日",releaseTime);
                        }else if(site.matches("tw|opinion|house")){
                            releaseTime = selectable.xpath("./i/text()").get().trim();
                        }else if(site.matches("health")){
                            releaseTime = selectable.xpath(".//div[@class=\"n_time\"]").get().trim();
                        }
                        if (StringUtils.isEmpty(releaseTime)) {
                            continue;
                        }

                        try {
                            String parsePatterns = "";
                            if(releaseTime.matches("\\d{4}-\\d{2}-\\d{2}")){
                                parsePatterns =  "yyyy-MM-dd";
                            }else if(releaseTime.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}：\\d{2}")){
                                parsePatterns = "yyyy-MM-dd HH：mm";
                            }else if(releaseTime.matches("\\d{6}月\\d{2}日")){
                                parsePatterns = "yyyyMM月dd日";
                            }else if(releaseTime.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}")){
                                parsePatterns = "yyyy-MM-dd HH:mm";
                            }else if(releaseTime.matches("\\d{4}年\\d{2}月\\d{2}日 \\d{2}:\\d{2}")){
                                parsePatterns = "yyyy年MM月dd日 HH:mm";
                            }
                            Long releaseTimeToLong = DateUtils.parseDate(releaseTime,parsePatterns).getTime();
                            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                    .itemPageRequest(crawlerRequestRecord)
                                    .httpUrl(itemUrl)
                                    .recordKey(itemUrl)
                                    .resultLabelTag(article)
                                    .needParsed(false)
                                    .releaseTime(releaseTimeToLong)
                                    .build();
                            itemRecord.addScheduleTags(crawlerRequestRecord.getTags().get(CrawlerEnum.CrawlerCategoryTag.crawler_category_schedule_tag.enumVal()));

                            parsedLinks.add(itemRecord);
                        } catch (ParseException e) {
                            log.warn("时间格式错误！正确时间是：" + releaseTime);
                        }
                    }
                }
            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        int statusCode = page.getStatusCode();
        String requestUrl = page.getRequest().getUrl();
        if (statusCode == 200) {
            if (crawlerResultTags.hasDataType(article)) {
                crawlerDataList.add(washArticle(crawlerRecord, page));
            }
        }


        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String site = itemUrl.substring(itemUrl.indexOf("//") + 2, itemUrl.indexOf("."));

        String author = httpPage.getHtml().xpath("//p[@class=\"author\"]/text()").get();

        String pre = httpPage.getHtml().xpath("//h3[@class=\"pre\"]/text()").get();
        pre = StringUtils.isEmpty(pre) ? "" : pre;
        String title = httpPage.getHtml().xpath("//h1/text()").get();
        String sub = httpPage.getHtml().xpath("//h4[@class=\"sub\"]/text()").get();
        sub = StringUtils.isNotEmpty(sub) ? sub : "";
        title = pre + title + sub;
        String source = httpPage.getHtml().xpath("//div[@class=\"box01\"]/div[@class=\"fl\"]/a/text()|//div[@class=\"fr\"]/a/text()").get();
        List<String> articleTextList = httpPage.getHtml().xpath("//div[@id=\"rwb_zw\"]/p//text()|//div[@class=\"content clear clearfix\"]/p//text()").all();
        String releaseTime = httpPage.getHtml().xpath("//div[@class=\"box01\"]/div[@class=\"fl\"]/text()|//p[@class=\"time\"]/text()|//div[@class=\"artOri\"]/text()").get();
        releaseTime = StringUtils.isNotEmpty(releaseTime) ? getString("\\d{4}年\\d{2}月\\d{2}日\\d{2}:\\d{2}", releaseTime) : "";
        if(StringUtils.isEmpty(releaseTime)){
            List<String> all = httpPage.getHtml().xpath("//div[@class=\"fr\"]/text()").all();
            for (String data : all) {
                data = StringUtils.isNotEmpty(data) ? getString("\\d{4}年\\d{2}月\\d{2}日\\d{2}:\\d{2}",data) : "";
                if(StringUtils.isNotEmpty(data)){
                    releaseTime = data;
                    break;
                }
            }
        }

        try {
            if ("null".equals(title)) {
                String h5 = httpPage.getHtml().xpath("//div[@class=\"content\"]/h5/text()").get();
                h5 = StringUtils.isNotEmpty(h5) ? h5 : "";
                String h3 = httpPage.getHtml().xpath("//div[@class=\"content\"]/h3/text()").get();
                h3 = StringUtils.isNotEmpty(h3) ? h3 : "";
                String h4 = httpPage.getHtml().xpath("//div[@class=\"content\"]/h4/text()").get();
                h4 = StringUtils.isNotEmpty(h4) ? h4 : "";
                title = h5 + h3 + h4;

                source = httpPage.getHtml().xpath("//p[@class=\"time\"]/a/text()").get();

                articleTextList = httpPage.getHtml().xpath("//div[@class=\"content\"]//p[@style=\"text-indent: 2em;\"]//text()|//div[@class=\"content\"]//p[@style=\"text-align: left;\"]//text()").all();
            }
            if(site.matches("health")){
                title = httpPage.getHtml().xpath("//h2/text()").get();
                source = httpPage.getHtml().xpath("//div[@class=\"artOri\"]/a/text()").get();
                articleTextList = httpPage.getHtml().xpath("//div[@class=\"artDet\"]/p//text()").all();
            }

            String parsePatterns = "";
            if (releaseTime.matches("\\d{4}年\\d{2}月\\d{2}日\\d{2}:\\d{2}")) {
                parsePatterns = "yyyy年MM月dd日HH:mm";
            }
            Long releaseTimeToLong = StringUtils.isNotEmpty(releaseTime) ? DateUtils.parseDate(releaseTime, parsePatterns).getTime() : crawlerRequestRecord.getReleaseTime();
            StringBuffer conents = new StringBuffer();
            int size = articleTextList.size();
            for (int i = 0; i < size; i++) {
                if(articleTextList.get(i).contains("推荐阅读")){
                    break;
                }
                if (StringUtils.isNotEmpty(articleTextList.get(i))) {
                    conents.append(articleTextList.get(i)).append(" ");
                }
            }

            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("-") + 1, itemUrl.lastIndexOf("."));

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_Content, conents.toString().trim())
                    .addContentKV(Field_Title, title.trim())
                    .addContentKV(Field_Source, source)
                    .addContentKV(Field_Author,author)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error("时间格式错误！正确的时间是："+ releaseTime);
        }
        return null;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

}
