package com.chance.cc.crawler.development.scripts.eastday;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/11/24 16:12
 * @Description
 *      东方网
 **/
public class EastdayCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(EastdayCrawlerScript.class);

    public static final String entranceUrl = "http://eastday.com/";//东方网入口地址
    public static final String listUrl = "https://apin.eastday.com/api/news/List";//接口地址
    public static final String specialNewsListUrl= "https://apin.eastday.com/api/Special/SpecialNewsList";//post请求接口
    public static final String specialListUrl = "https://apin.eastday.com/api/Special/SpecialNewsList";//post请求接口
    public static final String articleUrl = "https://n.eastday.com/(news|video)/\\d+";//文章地址
    public static final String prefixUrl = "http://news.eastday.com/gd2008";
    public static final String newsEntranceUrl = "http://news.eastday.com/gd2008/news/index\\d*.html";//新闻入口地址
    public static final String cityPrefixUrl = "http://city.eastday.com";
    public static final String cityEntranceUrl = "http://city.eastday.com/eastday/city/2016city/jz/index\\d*.html";//城市入口地址
    public static final String cityArticleUrl = "http://city.eastday.com/gk/\\S*/u1a\\S*.html";//城市文章地址


    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "eastday";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(entranceUrl);
        addUrlRegular(listUrl);
        addUrlRegular(specialNewsListUrl);
        addUrlRegular(specialListUrl);
        addUrlRegular(articleUrl);
        addUrlRegular(newsEntranceUrl);
        addUrlRegular(cityEntranceUrl);
        addUrlRegular(cityArticleUrl);

    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();

        if (requestUrl.matches(entranceUrl)) {
            String url = "";
            Map<String,Object> params = new HashMap<>();
            List<String> nodes = httpPage.getHtml().xpath("//div[@id=\"dh1\"]/div/p//@href").all();
            for (String itemUrl : nodes) {
                if (itemUrl.matches("http\\S*://(finance|world|china|city|sh|pinglun|panorama).eastday.com/|https://mil.021east.com/")) {
                    String key = itemUrl.substring(itemUrl.indexOf("//") + 2,itemUrl.indexOf("."));
                    String channelId = "";
                    String specialId = "";

                    switch (key){
                        case "finance" : channelId = "1";break;
                        case "world" : channelId = "2";break;
                        case "china": channelId = "10";break;
                        case "sh" : channelId = "15";break;
                        case "mil" : channelId = "5";break;
                        case "city" : specialId = "1821420";break;
                        case "pinglun" : specialId = "1689310";break;
                        case "panorama" : specialId = "891390" ;break;
                    }

                    if(StringUtils.isNotEmpty(channelId)){
                        url = "https://apin.eastday.com/api/news/List";
                        params.put("appId","190503");
                        params.put("channelId",channelId);
                        params.put("currentPage",1);
                        params.put("newsType",1);
                        params.put("pageSize",15);
                        params.put("version","1.0");
                    }else if(StringUtils.isNotEmpty(specialId)){
                        url = "https://apin.eastday.com/api/Special/SpecialNewsList";
                        params.put("appId","190503");
                        params.put("limitCount",20);
                        params.put("skipCount",0);
                        params.put("specialId",specialId);
                        params.put("version","1.0");
                    }else{
                        continue;
                    }

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(url)
                            .recordKey(url)
                            .notFilterRecord()
                            .releaseTime(System.currentTimeMillis())
                            .build();

                    HttpRequestBody httpRequestBody = HttpRequestBody.json(JSONObject.toJSONString(params),"UTF-8");
                    HttpRequest httpRequest = itemRecord.getHttpRequest();
                    httpRequest.setMethod("POST");
                    httpRequest.setRequestBody(httpRequestBody);
                    parsedLinks.add(itemRecord);
                }
            }
            //区情中各地区的接口
            List<String> localList = Arrays.asList("1821891","1821894","1821911","1821432","1821927","1821929","1821932","1821936","1822016",
                    "1822604","1822605","1822610","1822614","1822628","1822632","1822633");
            for (String local : localList) {
                url = "https://apin.eastday.com/api/Special/SpecialNewsList";
                params.put("appId","190503");
                params.put("limitCount",20);
                params.put("skipCount",0);
                params.put("specialId",local);
                params.put("version","1.0");

                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .recordKey(url)
                        .notFilterRecord()
                        .releaseTime(System.currentTimeMillis())
                        .build();

                HttpRequestBody httpRequestBody = HttpRequestBody.json(JSONObject.toJSONString(params),"UTF-8");
                HttpRequest httpRequest = itemRecord.getHttpRequest();
                httpRequest.setMethod("POST");
                httpRequest.setRequestBody(httpRequestBody);
                parsedLinks.add(itemRecord);
            }
            //社会接口
            url = "https://apin.eastday.com/api/news/List";
            params.put("appId","190503");
            params.put("channelId","8");
            params.put("currentPage",1);
            params.put("newsType",1);
            params.put("pageSize",15);
            params.put("version","1.0");

            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .recordKey(url)
                    .notFilterRecord()
                    .releaseTime(System.currentTimeMillis())
                    .build();

            HttpRequestBody httpRequestBody = HttpRequestBody.json(JSONObject.toJSONString(params),"UTF-8");
            HttpRequest httpRequest = itemRecord.getHttpRequest();
            httpRequest.setMethod("POST");
            httpRequest.setRequestBody(httpRequestBody);
            parsedLinks.add(itemRecord);
        } else if (requestUrl.matches(listUrl) || requestUrl.matches(specialListUrl)) {
            //下一页解析
            HttpRequestBody requestBody = httpPage.getRequest().getRequestBody();
            JSONObject requestBodyJson = JSONObject.parseObject(new String(requestBody.getBody()));
            String modelKey = "";
            Integer currentPage = null;
            if (requestUrl.matches(listUrl)) {
                currentPage = (Integer) requestBodyJson.get("currentPage");
                requestBodyJson.replace("currentPage", currentPage, currentPage + 1);
                modelKey = (String) requestBodyJson.get("channelId");
            } else {
                currentPage = (Integer) requestBodyJson.get("skipCount");
                requestBodyJson.replace("skipCount", currentPage, currentPage + (Integer) requestBodyJson.get("limitCount"));
                modelKey = (String) requestBodyJson.get("specialId");
            }
            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
//                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(requestUrl + modelKey + currentPage)
                    .httpUrl(requestUrl)
                    .copyBizTags()
                    .releaseTime(System.currentTimeMillis())
                    .build();

            HttpRequest httpRequest = turnPageRequest.getHttpRequest();
            httpRequest.setRequestBody(HttpRequestBody.json(requestBodyJson.toJSONString(),"UTF-8"));
            httpRequest.setMethod("POST");
            parsedLinks.add(turnPageRequest);

            //文章列表解析
            List<String> dataList = httpPage.getJson().jsonPath($_type + ".list").all();
            for (String data : dataList) {
                Json dataJson = new Json(data);
                String itemUrl = dataJson.jsonPath($_type + ".url").get();
                if (StringUtils.isEmpty(itemUrl)) {
                    continue;
                }
                String releaseTime = dataJson.jsonPath($_type + ".time").get();
                if (StringUtils.isEmpty(releaseTime)) {
                    continue;
                }

//                System.out.println(itemUrl);
                try {
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .copyBizTags()
                            .needParsed(false)
                            .resultLabelTag(article)
                            .build();

                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }else if(requestUrl.matches(newsEntranceUrl)){
            String nextPageUrl = httpPage.getHtml().xpath("//a[contains(text(),'下页')]/@href").get();
            if (StringUtils.isNotEmpty(nextPageUrl)) {
                nextPageUrl = prefixUrl + nextPageUrl.replace("..","");
                CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .copyBizTags()
                        .releaseTime(System.currentTimeMillis())
                        .build();
                parsedLinks.add(turnPageRequest);
            }

            List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"leftsection\"]/ul/li").nodes();
            for (Selectable node : nodes) {
                String itemUrl = node.xpath("./a[@class=\"blue14\"]/@href").get();
                if (StringUtils.isBlank(itemUrl)) {
                    continue;
                }

                String releaseTime = node.xpath("./span/text()").get();
                if (StringUtils.isBlank(releaseTime)) {
                    continue;
                }

                try {
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm:ss").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .copyBizTags()
                            .needParsed(false)
                            .resultLabelTag(article)
                            .build();

                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }else if(requestUrl.matches(cityEntranceUrl)){
            String nextPageUrl = httpPage.getHtml().xpath("//p[@class=\"nextpage\"]/a/@href").get();
            if (StringUtils.isNotEmpty(nextPageUrl)) {
                nextPageUrl = cityPrefixUrl + nextPageUrl;
                CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
//                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .copyBizTags()
                        .build();
                parsedLinks.add(turnPageRequest);
            }

            List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"listbox\"]/ul/li").nodes();
            for (Selectable node : nodes) {
                String itemUrl = node.xpath("./a/@href").get();
                if (StringUtils.isBlank(itemUrl)) {
                    continue;
                } else {
                    itemUrl = cityPrefixUrl + itemUrl;
                }

                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(itemUrl)
                        .httpUrl(itemUrl)
                        .copyBizTags()
                        .needParsed(false)
                        .resultLabelTag(article)
                        .build();

                parsedLinks.add(itemRecord);

            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        int statusCode = page.getStatusCode();
        String url = page.getRequest().getUrl();
        if(statusCode == 200){
            if(url.matches(articleUrl)){
                if (crawlerResultTags.hasDataType(article)) {
                    crawlerDataList.add(washArticle(crawlerRecord, page));

                }
            }else if(url.matches(cityArticleUrl)){
                if (crawlerResultTags.hasDataType(article)) {
                    crawlerDataList.add(washCityArticle(crawlerRecord, page));
                }
            }
        }else{
            log.error("页面(“" + url + "”)出现错误，错误代码：" + statusCode);
        }

        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        String title = httpPage.getHtml().xpath("//div[@class=\"article\"]/h1/text()").get();

        String author = httpPage.getHtml().xpath("//span[@class=\"author\"]").get();
        if(StringUtils.isNotEmpty(author) && author.contains("作者：")){
            author = author.substring(author.indexOf("：") + 1);
        }else{
            author = "";
        }

        String source = httpPage.getHtml().xpath("//span[@class=\"source\"]/a").get();

        List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"detail\"]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

        CrawlerData crawlerData = null;
        crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                .url(itemUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_Content, conents.toString().trim())
                .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title))
                .addContentKV(Field_Source,source)
                .addContentKV(Field_Author, author)
                .build();

        return crawlerData;

    }

    public CrawlerData washCityArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        String title = httpPage.getHtml().xpath("//p[@class=\"title\"]/text()").get();

        List<Selectable> info = httpPage.getHtml().xpath("//p[@class=\"info\"]/span").nodes();
        String author = "";
        String source = "";
        String releaseTime = "";
        for (Selectable selectable : info) {
            String data = selectable.xpath("./text()").get();
            if (StringUtils.isNotEmpty(data)) {
                if (data.contains("来源：")) {
                    source = StringUtils.isNotEmpty(selectable.xpath("./a/text()").get()) ? selectable.xpath("./a/text()").get() : data.substring(data.indexOf("：") + 1);
                } else if (data.contains("作者：")) {
                    author = data.substring(data.indexOf("：") + 1);
                } else if (data.matches("\\d{4}/\\d+/\\d+ \\d+:\\d+:\\d+")) {
                    releaseTime = data;
                }
            }
        }

        List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"nr\"]//text()").all();

        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy/MM/dd HH:mm:ss").getTime())
                    .addContentKV(Field_Content, conents.toString().trim())
                    .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title))
                    .addContentKV(Field_Source, source)
                    .addContentKV(Field_Author, author)
                    .build();
        } catch (ParseException e) {
            log.error("时间格式错误");
        }

        return crawlerData;
    }
    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

}
