package com.chance.cc.crawler.development.scripts.hangzhou;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.proxy.Proxy;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/12/1 14:40
 * @Description
 *      杭州网
 **/
public class HangzhouCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(HangzhouCrawlerScript.class);

    public static final String hangzhouEntranceUrl = "https://www.hangzhou.com.cn/";
    public static final String moduleEntranceUrl = "http\\S*://(news|hznews).hangzhou.com.cn/|http\\S*://(jrsh).hangzhou.com.cn";
    public static final String moduleArticleUrl = "http\\S*://\\S+.hangzhou.com.cn/\\S+/index.htm";
    public static final String moduleArticleNextUrl = "http\\S*://\\S+.hangzhou.com.cn/\\S*/index_\\d+.htm";
    public static final String articleUrl = "http\\S*://\\S+.hangzhou.com.cn/\\S*/content/\\d{4}-\\d{2}/\\d{2}/content_[0-9_]*.(htm|html)";

    public static final List<String> noModulesList = Arrays.asList("xinzheng");

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "hangzhou";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(hangzhouEntranceUrl);
        addUrlRegular(moduleEntranceUrl);
        addUrlRegular(moduleArticleUrl);
        addUrlRegular(moduleArticleNextUrl);
        addUrlRegular(articleUrl);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        Proxy proxy = new Proxy();
        proxy.setHost("http-dyn.abuyun.com");
        proxy.setPort(9020);
        proxy.setUsername("HL89Q19E86E2987D");
        proxy.setPassword("71F33D94CE5F7BF2");

        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();
        String webSite = requestUrl.substring(requestUrl.indexOf("//") + 2, requestUrl.indexOf("."));

        if(requestUrl.matches(hangzhouEntranceUrl)){
            List<String> all = httpPage.getHtml().xpath("//div[@class=\"content\"]//a/@href").all();
            for (String url : all) {
                if(url.matches(moduleEntranceUrl)){
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(url)
                            .recordKey(url)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .proxy(proxy)
                            .build();

                    parsedLinks.add(itemRecord);
                }
            }
        }else if(requestUrl.matches(moduleEntranceUrl)){
            List<String> all = new LinkedList<>();
            if(webSite.matches("news|hznews")){
                all = httpPage.getHtml().xpath("//div[@class=\"main\"]/a/@href").all();
            }else if(webSite.matches("jrsh")){
                all = httpPage.getHtml().xpath("//a[@onmouseover]/@href").all();
            }
            for (String url : all) {
                if(url.matches(moduleArticleUrl)){
                    String key = "";
                    if(webSite.matches("hznews")){
                        String[] split = url.split("/");
                        key = split[split.length - 2];
                    }
                    if(webSite.matches("news|jrsh") || (webSite.matches("hznews") && !noModulesList.contains(key))){
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(url)
                                .recordKey(url)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .proxy(proxy)
                                .build();

                        parsedLinks.add(itemRecord);
                    }
                }
            }
        }else if(requestUrl.matches(moduleArticleUrl) || requestUrl.matches(moduleArticleNextUrl)){
            //下一页地址解析
            String nextPageUrl = httpPage.getHtml().xpath("//li[@class=\"page-next\"]/a/@href").get();

            if(StringUtils.isNotEmpty(nextPageUrl)){
                CrawlerRequestRecord turnPageRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .needWashed(false)
                        .releaseTime(System.currentTimeMillis())
                        .proxy(proxy)
                        .build();

                parsedLinks.add(turnPageRecord);
            }


            //文章列表解析
            List<Selectable> nodes = new LinkedList<>();
            if(webSite.matches("hznews|news")){
                nodes = httpPage.getHtml().xpath("//ul[@class=\"list\"]/li").nodes();
            }else if(webSite.matches("jrsh")){
                nodes = httpPage.getHtml().xpath("//td[@class=\"Dlife_ccolor02\"]").nodes();
            }
            for (Selectable node : nodes) {
                String itemUrl = node.xpath("./a/@href").get();
                if(StringUtils.isEmpty(itemUrl)){
                    continue;
                }


                String releaseTime = "";
                if(webSite.matches("hznews|news")){
                    releaseTime = node.xpath("./span/text()").get();
                }else if(webSite.matches("jrsh")){
                    List<String> all = node.xpath("./text()").all();
                    for (String data : all) {
                        data = StringUtils.isNotEmpty(data) ? getString("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}",data) : "";
                        if(StringUtils.isNotEmpty(data)){
                            releaseTime = data;
                            break;
                        }
                    }
                }
                if(StringUtils.isEmpty(releaseTime)){
                    continue;
                }

                try {
                    String paresePatterns = "";
                    if(releaseTime.matches("\\d{4}-\\d{2}-\\d{2}")){
                        paresePatterns = "yyyy-MM-dd";
                    }else if(releaseTime.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}")){
                        paresePatterns = "yyyy-MM-dd HH:mm";
                    }
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime,paresePatterns).getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .proxy(proxy)
                            .releaseTime(releaseTimeToLong)
                            .resultLabelTag(article)
                            .build();

                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }else if(requestUrl.matches(articleUrl)){
            List<Selectable> nodes = httpPage.getHtml().xpath("//ul[@class=\"pages\"]//a").nodes();
            if(nodes != null){
                for (Selectable node : nodes) {
                    String text = node.xpath("./text()").get();
                    if(text.matches("\\d+")){
                        String itemUrl = node.xpath("./@href").get();
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(itemUrl)
                                .httpUrl(itemUrl)
                                .releaseTime(crawlerRequestRecord.getReleaseTime())
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                                .build();

                        parsedLinks.add(itemRecord);
                    }
                }
            }
        }

        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {

        StringBuffer conents = new StringBuffer();
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            List<String> articleTextList = internalDownloadRecord.getInternalDownloadPage().getHtml().xpath("//td[@class=\"Dlife_bcolor02\"]//p//text()|//div[contains(@class,'zhengwen')]//p//text()").all();
            for (String articleText : articleTextList) {
                if (StringUtils.isNotEmpty(articleText)) {
                    conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
                }
            }
        }

        Map<String,Object> extras = new HashMap<>();
        extras.put("content",conents);
        crawlerRecord.getHttpRequest().setExtras(extras);
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        int statusCode = page.getStatusCode();
        if(statusCode == 200){
            if (crawlerResultTags.hasDataType(article)) {
                crawlerDataList.add(washArticle(crawlerRecord, page));
            }
        }else{
            log.info("该文章(“" +page.getRequest().getUrl() +"”)下载错误，错误码：" + statusCode);
        }

        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String requestUrl = httpPage.getRequest().getUrl();
        String webSite = requestUrl.substring(requestUrl.indexOf("//") + 2, requestUrl.indexOf("."));

        String title = "";
        String source = "";
        String releaseTime = "";
        List<String> articleTextList = new LinkedList<>();
        if(webSite.matches("news|hznews")){
            title = httpPage.getHtml().xpath("//div[@class=\"tit\"]//text()").get();
            source = httpPage.getHtml().xpath("//div[@class=\"tit1\"]/a/text()").get();
            releaseTime = httpPage.getHtml().xpath("//div[@class=\"tit1\"]/text()").get();
            articleTextList = httpPage.getHtml().xpath("//div[contains(@class,'zhengwen')]//p//text()").all();
        }else if(webSite.matches("jrsh")){
            title = httpPage.getHtml().xpath("//td[@class=\"Dlife_bcolor\"]/text()").get();
            source = httpPage.getHtml().xpath("//td[@height=\"25\"]/a").get();
            releaseTime = httpPage.getHtml().xpath("//td[@height=\"25\"]/text()").get();
            articleTextList = httpPage.getHtml().xpath("//td[@class=\"Dlife_bcolor02\"]//p//text()").all();
        }

        releaseTime = getString("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}[:0-9]*",releaseTime);

        String parsePattern = "";
        if(releaseTime.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}")){
            parsePattern = "yyyy-MM-dd HH:mm";
        }else if(releaseTime.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}")){
            parsePattern = "yyyy-MM-dd HH:mm:ss";
        }

        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            if (StringUtils.isNotEmpty(articleText)) {
                conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
            }
        }

        Map<String, Object> extras = httpPage.getRequest().getExtras();
        if(extras != null){
            conents.append(extras.get("content"));
        }

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, parsePattern).getTime())
                    .addContentKV(Field_Content, conents.toString().trim())
                    .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title).trim())
                    .addContentKV(Field_Source, source)
                    .build();
        } catch (ParseException e) {
            log.error("时间格式错误");
        }

        return crawlerData;

    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }
    /**
     * @author Zhao.Hhuan
     * @date 2020/12/1 16:15
     * @description:
     *      获取时间列表
     * @param regx
	 * @param input
     * @return java.lang.String
     **/
    private static List<String> getTimeList(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> timeList = new ArrayList<>();
        while (matcher.find()) {
            timeList.add(matcher.group(0));
        }
        return timeList;
    }

}
