package com.chance.cc.crawler.development.scripts.jfadaily;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConstant;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/12/3 13:19
 * @Description 上观网
 **/
public class JfdailyCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(JfdailyCrawlerScript.class);

    public static final String jfadailyPrefix = "https://www.jfdaily.com";
    public static final String jfadailyEntranceUrl = "https://www.jfdaily.com/home";
    public static final String listEntranceUrl = "https://www.jfdaily.com/news/list\\?section=\\S*";
    public static final String subListEntranceUrl = "https://www.jfdaily.com/news/sublist\\?section=\\S*";
    public static final String articleUrl = "https://www.jfdaily.com/news/detail\\?id=\\d+";
    public static final String commentUrl = "https://www.jfdaily.com/news/getLatestReplyList";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "jfdaily";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(jfadailyEntranceUrl);
        addUrlRegular(listEntranceUrl);
        addUrlRegular(subListEntranceUrl);
        addUrlRegular(articleUrl);
        addUrlRegular(commentUrl);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return false;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();
        String webSite = requestUrl.substring(requestUrl.indexOf("//") + 2, requestUrl.indexOf("."));
        int statusCode = httpPage.getStatusCode();

        if (statusCode == 200) {
            if (requestUrl.matches(jfadailyEntranceUrl)) {
                //list模块
                List<Selectable> nodes = httpPage.getHtml().xpath("//li[@class=\"section-li\"]").nodes();
                for (Selectable node : nodes) {
                    String listUrl = jfadailyPrefix + node.xpath("./a/@href").get();
                    if (listUrl.matches(listEntranceUrl)) {
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .turnPageRequest(crawlerRequestRecord)
                                .httpUrl(listUrl)
                                .recordKey(listUrl)
                                .releaseTime(System.currentTimeMillis())
                                .notFilterRecord()
                                .build();

                        parsedLinks.add(itemRecord);
                    }


                    //sublist模块解析
                    List<String> all = node.xpath("./ul/li/a/@href").all();
                    for (String sublistUrl : all) {
                        sublistUrl = jfadailyPrefix + sublistUrl;
                        if (sublistUrl.matches(subListEntranceUrl)) {
                            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                    .turnPageRequest(crawlerRequestRecord)
                                    .httpUrl(sublistUrl)
                                    .recordKey(sublistUrl)
                                    .releaseTime(System.currentTimeMillis())
                                    .notFilterRecord()
                                    .build();

                            parsedLinks.add(itemRecord);
                        }
                    }
                }
            } else if (requestUrl.matches(listEntranceUrl) || requestUrl.matches(subListEntranceUrl)) {
                //下一页地址解析
                String nextPageUrl = StringEscapeUtils.unescapeHtml4(httpPage.getHtml().xpath("//a[@class=\"page-nav-next\"]/@href").get());
                if (StringUtils.isNotEmpty(nextPageUrl)) {
                    nextPageUrl = jfadailyPrefix + nextPageUrl;

                    CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .recordKey(nextPageUrl)
                            .httpUrl(nextPageUrl)
                            .needWashed(false)
                            .releaseTime(System.currentTimeMillis())
                            .build();

                    parsedLinks.add(nextPageRecord);
                }

                //文章列表解析
                List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"chengshi\"]").nodes();
                for (Selectable node : nodes) {
                    String itemUrl = node.xpath(".//div[@class=\"chengshi_wz_h\"]/a/@href").get();
                    if (StringUtils.isEmpty(itemUrl)) {
                        continue;
                    } else {
                        itemUrl = jfadailyPrefix + itemUrl;
                    }

                    List<String> releaseTimeList = node.xpath(".//div[@class=\"chengshi_wz_f\"]/text()").all();
                    String releaseTime = "";
                    for (String s : releaseTimeList) {
                        releaseTime = StringUtils.isNotEmpty(s) ? getString("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}", s) : "";
                        if (StringUtils.isNotEmpty(releaseTime)) {
                            break;
                        }
                    }

                    if (StringUtils.isEmpty(releaseTime)) {
                        continue;
                    }

                    try {
                        long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd HH:mm").getTime();
                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(itemUrl)
                                .httpUrl(itemUrl)
                                .needParsed(false)
                                .resultLabelTag(article)
                                .resultLabelTag(interaction)
                                .releaseTime(releaseTimeToLong)
                                .build();

                        parsedLinks.add(itemRecord);

                        Map<String, Object> params = new HashMap<>();
                        Map<String, Object> extras = new HashMap<>();

                        //评论地址
                        String nid = itemUrl.substring(itemUrl.lastIndexOf("=") + 1);
                        String commentUrl = "https://www.jfdaily.com/news/getLatestReplyList";
                        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(commentUrl + nid)
                                .httpUrl(commentUrl)
                                .resultLabelTag(comment)
                                .releaseTime(releaseTimeToLong)
                                .build();

                        HttpRequest httpRequest = commentRecord.getHttpRequest();
                        httpRequest.setMethod(HttpConstant.Method.POST);

                        params.put("nid", nid);
                        params.put("lastReplyId", "0");
                        params.put("page", 1);
                        params.put("pagesize", 20);
                        httpRequest.setRequestBody(HttpRequestBody.form(params, "UTF-8"));


                        extras.putAll(params);
                        extras.put("articleUrl", itemUrl);
                        httpRequest.setExtras(extras);

//                        httpRequest.addHeader("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8");
//                        httpRequest.addHeader("Referer", itemUrl);
//                        httpRequest.addHeader("Host", "www.jfdaily.com");

                        parsedLinks.add(commentRecord);
                    } catch (ParseException e) {
                        log.error("时间格式错误！时间是：" + releaseTime);
                    }
                }
            } else if (requestUrl.matches(commentUrl)) {
                Map<String, Object> oldExtras = httpPage.getRequest().getExtras();

                //下一页地址的解析
                List<String> all = httpPage.getJson().jsonPath($_type + ".object").all();
                if (all.size() >= 20) {
                    JSONObject lastReply = JSONObject.parseObject(all.get(all.size() - 1));
                    String addTime = lastReply.getString("addTime");

                    CrawlerRequestRecord commentRecord = null;
                    try {
                        commentRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(requestUrl)
                                .httpUrl(requestUrl)
                                .resultLabelTag(comment)
                                .notFilterRecord()
                                .releaseTime(DateUtils.parseDate(addTime, "yyyy-MM-dd HH:mm:ss").getTime())
                                .build();

                        Map<String, Object> params = new HashMap<>();
                        Map<String, Object> extras = new HashMap<>();
                        HttpRequest httpRequest = commentRecord.getHttpRequest();
                        params.put("nid", oldExtras.get("nid"));
                        params.put("lastReplyId", lastReply.getString("id"));
                        params.put("page", Integer.parseInt((String) oldExtras.get("page")) + 1);
                        params.put("pagesize", oldExtras.get("pagesize"));
                        httpRequest.setMethod(HttpConstant.Method.POST);
                        httpRequest.setRequestBody(HttpRequestBody.form(params, "UTF-8"));
                        httpRequest.setHeaders(crawlerRequestRecord.getHttpRequest().getHeaders());

                        extras.putAll(oldExtras);
                        extras.putAll(params);
                        httpRequest.setExtras(extras);

                        parsedLinks.add(commentRecord);
                    } catch (ParseException e) {
                        log.error("时间格式错误！时间是：" + addTime);
                    }
                }
            }
        } else {
            crawlerRequestRecord.setNeedWashPage(false);
            log.info("页面(“ " + requestUrl + "”)下载错误！状态码：" + statusCode);
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        //TODO 存在重定向的问题，如何通过response拿到https://www.jfdaily.com/news/detail?id=317913的回复
//        Header[] responseHeaders = page.getResponseHeaders();
        String url = page.getHtml().xpath("//meta[@property=\"og:url\"]/@content|//meta[@name=\"contentid\"]/@content").get();
        if(StringUtils.isEmpty(url)){
            int statusCode = page.getStatusCode();
            if (statusCode == 200) {
                if (crawlerResultTags.hasDataType(article)) {
                    crawlerDataList.add(washArticle(crawlerRecord, page));
                }

                if (crawlerResultTags.hasDataType(interaction)) {
                    crawlerDataList.add(washInteraction(crawlerRecord, page));
                }

                if (crawlerResultTags.hasDataType(comment)) {
                    Map<String, Object> oldExtras = page.getRequest().getExtras();

                    List<String> all = page.getJson().jsonPath($_type + ".object").all();
                    if (all.size() > 0) {
                        for (String data : all) {
                            crawlerDataList.add(washComment(crawlerRecord, page, data, oldExtras));
                        }
                    } else {
                        log.warn("文章(“" + oldExtras.get("articleUrl") + "”)评论为空！");
                    }
                }
            } else {
                log.info("该文章(“" + page.getRequest().getUrl() + "”)下载错误，错误码：" + statusCode);
            }
        }else{
            log.info("页面("+crawlerRecord.getHttpRequest().getUrl()+")发生了跳转！");
        }
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String webSite = itemUrl.substring(itemUrl.indexOf("//") + 2, itemUrl.indexOf("."));

        String title = httpPage.getHtml().xpath("//div[@class=\"wz_contents\"]/text()").get();
        String source = "";
        String author = "";
        List<String> all = httpPage.getHtml().xpath("//div[@class=\"fenxiang_zz\"]//text()").all();
        for (int i = 0; i < all.size(); i++) {
            if (all.get(i).contains("来源：")) {
                source = all.get(i + 1);
                continue;
            }
            if (all.get(i).contains("作者：")) {
                author = all.get(i + 1);
                continue;
            }
        }

        List<String> articleTextList = httpPage.getHtml().xpath("//div[@id=\"newscontents\"]//text()").all();
        List<String> abstractTextList = httpPage.getHtml().xpath("//div[@class=\"wz_contents1\"]/div[1]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String abstractText : abstractTextList) {
            if (StringUtils.isNotEmpty(abstractText)) {
                conents.append(StringEscapeUtils.unescapeHtml4(abstractText)).append(" ");
            }
        }
        for (String articleText : articleTextList) {
            if (StringUtils.isNotEmpty(articleText)) {
                conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
            }
        }

        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("=") + 1);

        CrawlerData crawlerData = null;

        crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                .url(itemUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_Content, conents.toString().trim())
                .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(title).trim())
                .addContentKV(Field_Source, source)
                .addContentKV(Field_Author, author)
                .build();


        return crawlerData;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("=") + 1);

        String like = httpPage.getHtml().xpath("//span[@id=\"newspraisecounter\"]/text()").get();
        like = StringUtils.isNotEmpty(like) ? getString("\\d+", like) : "0";
        String comment = httpPage.getHtml().xpath("//div[@class=\"dj\"]/span/text()").get();
        comment = StringUtils.isNotEmpty(comment) ? getString("\\d+", comment) : "0";

        CrawlerData crawlerData = null;
        crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .url(itemUrl)
                .addContentKV(Field_I_Likes, like)
                .addContentKV(Field_I_Comments, comment)
                .build();

        return crawlerData;

    }

    public CrawlerData washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, String data, Map<String, Object> oldExtras) {

        JSONObject jsonObject = JSONObject.parseObject(data);
        String userName = jsonObject.getString("userName");
        String addTime = jsonObject.getString("addTime");
        String content = jsonObject.getString("content");
        String id = jsonObject.getString("id");

        String articleKey = (String) oldExtras.get("nid");
        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), id))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                    .releaseTime(DateUtils.parseDate(addTime, "yyyy-MM-dd HH:mm:ss").getTime())
                    .url(crawlerRequestRecord.getHttpRequest().getUrl())
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .addContentKV("author", userName)
                    .addContentKV("content", content)
                    .build();
        } catch (ParseException e) {
            log.error("时间格式错误！时间是：" + addTime);
        }
        return crawlerData;

    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }
}
