package com.chance.cc.crawler.development.scripts.thepaper;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;

public class ThepaperCrawlerScript extends CrawlerCommonScript {
    private Logger log= LoggerFactory.getLogger(ThepaperCrawlerScript.class);
    private static final String url="https://www.thepaper.cn/load_index.jsp";
    private static final String pageUrl="https://www.thepaper.cn/";

    @Override
    public String domain() {
        return "thepaper";
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(url);
        addUrlRegular("https://www.thepaper.cn/load_index.jsp\\S*");
        addUrlRegular("https://www.thepaper.cn/newsDetail\\S*");
        addUrlRegular("https://www.thepaper.cn/newDetail_commt.jsp\\?contid=\\d*\\&_=\\d*");
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks=new ArrayList<>();

        String lastUrl = httpPage.getRequest().getUrl();
        if(lastUrl.matches("https://www.thepaper.cn/load_index.jsp")){
            List<String> listUrl=Arrays.asList("?nodeids=25462,25488,97924,25489,25490,25423,25426,25424,25463,25491,25428,68750,27604,25464,25425,25429,25481,25430,25678,25427,25422,25487,25634,25635,25600,&channelID=25950&topCids=&pageidx=1",
                    "?nodeids=25434,25436,25433,25438,25435,25437,27234,25485,25432,37978,&channelID=25951&topCids=&pageidx=1",
                    "?nodeids=25487&topCids=&pageidx=1&isList=true");

            for(String itemUrl:listUrl){
                itemUrl=url+itemUrl;
                CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(itemUrl)
                        .httpUrl(itemUrl)
                        .releaseTime(System.currentTimeMillis())
                        .build();
                parsedLinks.add(turnPageRequest);
            }

        }else if(lastUrl.matches("https://www.thepaper.cn/load_index.jsp\\S*")){
            if(lastUrl.matches("https://www.thepaper.cn/load_index.jsp\\?nodeids=\\d*,\\S*")) {
                int currentPage = Integer.parseInt(lastUrl.substring(lastUrl.lastIndexOf("=") + 1));
                String nextPageUrl = lastUrl.substring(0, lastUrl.lastIndexOf("=") + 1) + (currentPage + 1);
                CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .build();
                parsedLinks.add(turnPageRequest);

                List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"news_li\"]").nodes();

                for (Selectable node : nodes) {
                    String itemUrl = node.xpath("./h2/a/@href").get();

                    String articleKey=itemUrl.substring(itemUrl.lastIndexOf("_")+1);

                    itemUrl = pageUrl + itemUrl;

                    if (StringUtils.isBlank(itemUrl)) {
                        continue;
                    }

                    String pubTime = node.xpath("./div[@class=\"pdtt_trbs\"]/span/text()").get();
                    if (pubTime.contains("刚刚")) {
                        pubTime = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss").format(new Date());
                    } else {
                        pubTime = this.cleanTime(pubTime);
                    }
                    try {
                        long releaseTime = DateUtils.parseDate(pubTime, "yyyy-MM-dd HH:mm:ss").getTime();

                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(itemUrl)
                                .httpUrl(itemUrl)
                                .needParsed(false)
                                .releaseTime(releaseTime)
                                .resultLabelTag(article)
                                .resultLabelTag(interaction)
                                .build();
                        parsedLinks.add(itemRecord);

                        String itemUrl1="https://www.thepaper.cn/newDetail_commt.jsp?contid="+articleKey+"&_="+releaseTime;
                        CrawlerRequestRecord itemRecord1 = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(itemUrl1)
                                .httpUrl(itemUrl1)
                                .releaseTime(releaseTime)
                                .needParsed(false)
                                .resultLabelTag(comment)
                                .build();
                        parsedLinks.add(itemRecord1);

                    } catch (ParseException e) {
                        e.printStackTrace();
                    }

                }
            }else if(lastUrl.matches("https://www.thepaper.cn/load_index.jsp\\?nodeids=\\d*&\\S*")){
                String[] splittUrl = lastUrl.split("pageidx=");
                String[] numId = splittUrl[1].split("&isList");
                int nextPageId = Integer.parseInt(numId[0]) + 1;
                String nextPageUrl = (splittUrl[0] + "pageidx=" + nextPageId + "&isList=true");
                CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .build();
                parsedLinks.add(turnPageRequest);

                List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"news_li\"]").nodes();

                for (Selectable node:nodes){
                    String itemUrl = node.xpath("./h2/a/@href").get();
                    String articleKey=itemUrl.substring(itemUrl.lastIndexOf("_")+1);
                    itemUrl = pageUrl + itemUrl;

                    if (StringUtils.isBlank(itemUrl)) {
                        continue;
                    }

                    String pubTime = node.xpath("./div[@class=\"pdtt_trbs\"]/span/text()").get();

                    if (pubTime.contains("刚刚")) {
                        pubTime = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss").format(new Date());
                    } else {
                        pubTime = this.cleanTime(pubTime);
                    }
                    long releaseTime=0;
                    try {
                        releaseTime = DateUtils.parseDate(pubTime, "yyyy-MM-dd HH:mm:ss").getTime();

                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(itemUrl)
                                .httpUrl(itemUrl)
                                .releaseTime(releaseTime)
                                .needParsed(false)
                                .resultLabelTag(article)
                                .resultLabelTag(interaction)
                                .build();
                        parsedLinks.add(itemRecord);

                        String itemUrl1="https://www.thepaper.cn/newDetail_commt.jsp?contid="+articleKey+"&_="+releaseTime;
                        CrawlerRequestRecord itemRecord1 = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(itemUrl1)
                                .httpUrl(itemUrl1)
                                .releaseTime(releaseTime)
                                .needParsed(false)
                                .resultLabelTag(comment)
                                .build();
                        parsedLinks.add(itemRecord1);
                    } catch (ParseException e) {
                        e.printStackTrace();
                    }

                }
            }


        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            List<CrawlerData> crawlerData=washComment(crawlerRecord, page);
            for (CrawlerData cd:crawlerData){
                crawlerDataList.add(cd);
            }
        }

        return crawlerDataList;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList=new ArrayList<>();
        String requestUrl=httpPage.getRequest().getUrl();
        String authorKey=requestUrl.substring(requestUrl.indexOf("=")+1,requestUrl.lastIndexOf("&"));

        List<Selectable> nodes=httpPage.getHtml().xpath("//div[@id=\"mainContent\"]/div[@class=\"comment_que\"]").nodes();
        if(nodes.size()>0) {
            int i=1;
            for (Selectable itemNode : nodes) {
                String commentContent = itemNode.xpath(".//div[@class=\"ansright_cont\"]/a/text()").get();
                if(commentContent==null){
                    continue;
                }
                String commentAuthor = itemNode.xpath(".//div[@class=\"aqwright\"]/h3/a/text()").get();
                String pubTime = itemNode.xpath(".//div[@class=\"aqwright\"]/h3/span/text()").get();
                if (pubTime.contains("刚刚")) {
                    pubTime = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss").format(new Date());
                } else {
                    pubTime = this.cleanTime(pubTime);
                }
                try {
                    CrawlerData crawlerData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), authorKey))
                            .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), authorKey+""+i))
                            .url(httpPage.getRequest().getUrl())
                            .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                            .addContentKV("content", commentContent)
                            .addContentKV("author", commentAuthor)
                            .build();

                    crawlerDataList.add(crawlerData);
                    i++;
                }catch (Exception e){
                    e.printStackTrace();
                }
            }
            return crawlerDataList;
        }else {
            String commentContent = httpPage.getHtml().xpath("//div[@class=\"comment_que\"]/div/div/div/a/text()").get();
            String commentAuthor = "";
            if (StringUtils.isEmpty(commentContent)) {
                commentContent = "暂无评论";
            }
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), authorKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), authorKey))
                    .url(httpPage.getRequest().getUrl())
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV("content", commentContent)
                    .addContentKV("author", commentAuthor)
                    .build();

            crawlerDataList.add(crawlerData);
            return crawlerDataList;

        }
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        Map<String,String> article = new HashMap<>();
        try {

            List<String> texts = httpPage.getHtml().xpath("//div[@class=\"news_txt\"]/text()").all();

            String author = httpPage.getHtml().xpath("//div[@class=\"news_about\"]/p/text()").get();

            List<String> releaseTimeStr=httpPage.getHtml().xpath("//div[@class=\"news_about\"]/p/text()").all();

            StringBuffer content=new StringBuffer();
            for(String time:releaseTimeStr){
                content.append(time).append(" ");
            }

            Matcher mtPubTime = Pattern.compile("\\d*[-/]\\d*[-/]\\d* \\d*:\\d*").matcher(content.toString());
            String pubTime = null;
            while (mtPubTime.find()){
                pubTime = mtPubTime.group(0);
            }

            if(pubTime.contains("年")){
                pubTime=pubTime.replace("年","-");
                pubTime=pubTime.replace("月","-");
                pubTime=pubTime.replace("日","");
            }

            StringBuffer context=new StringBuffer();
            for(String text:texts){
                context.append(text).append(" ");
            }

            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1).split("\\.")[0];

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm").getTime())
                    .addContentKV("content",context.toString().trim())
                    .addContentKV("title",httpPage.getHtml().xpath("//h1[@class=\"news_title\"]").get())
                    .addContentKV("author",author)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage(),e);
        }
        return null;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1);
        //点赞数
        String links=httpPage.getHtml().xpath("//a[@class=\"zan\"]/text()").get().trim();
        String comments=httpPage.getHtml().xpath("//h2[@id=\"comm_span\"]/span").get();
        if(comments.length()==2){
            comments="0";
        }else{
            comments=comments.substring(comments.lastIndexOf("（")+1,comments.lastIndexOf("）"));
        }
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                .url(itemUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV("likes", links)
                .addContentKV("comments",comments)
                .build();
        return crawlerData;
    }

    public String cleanTime(String time_need_clean) {
        long ONE_SECOND = 1000L;
        long ONE_MINUTE = 60000L;
        long ONE_HOUR = 3600000L;
        long ONE_DAY = 86400000L;
        long ONE_WEEK = 604800000L;

        String ONE_SECOND_AGO = "秒前";
        String ONE_MINUTE_AGO = "分钟前";
        String ONE_HOUR_AGO = "小时前";
        String ONE_DAY_AGO = "天前";
        String ONE_WEEK_AGO = "周前";
        String ONE_MONTH_AGO = "月前";
        String ONE_YEAR_AGO = "年前";
        String YESTERDAY = "昨天";
        String THE_DAY_BEFORE_YESTERDAY = "前天";
        String TODAY = "今天";

        long nowTime = new Date().getTime();
        Calendar calendar = Calendar.getInstance();
        SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss");
        String releaseTime = "";
        if (time_need_clean.contains(ONE_SECOND_AGO)) {
            int time_base = Integer.parseInt(time_need_clean.replace(ONE_SECOND_AGO, ""));
            long rTime = nowTime - (time_base * ONE_SECOND);
            releaseTime = simpleDateFormat.format(new Date(rTime));

        } else if (time_need_clean.contains(ONE_MINUTE_AGO)) {
            int time_base = Integer.parseInt(time_need_clean.replace(ONE_MINUTE_AGO, ""));
            long rTime = nowTime - (time_base * ONE_MINUTE);
            releaseTime = simpleDateFormat.format(new Date(rTime));

        } else if (time_need_clean.contains(ONE_HOUR_AGO)) {
            int time_base = Integer.parseInt(time_need_clean.replace(ONE_HOUR_AGO, ""));
            long rTime = nowTime - (time_base * ONE_HOUR);
            releaseTime = simpleDateFormat.format(new Date(rTime));

        } else if (time_need_clean.contains(ONE_DAY_AGO)) {
            int time_base = Integer.parseInt(time_need_clean.replace(ONE_DAY_AGO, ""));
            long rTime = nowTime - (time_base * ONE_DAY);
            releaseTime = simpleDateFormat.format(new Date(rTime));

        }else if (time_need_clean.contains(THE_DAY_BEFORE_YESTERDAY)) {
            long rTime = nowTime - (ONE_DAY * 2);
            releaseTime = simpleDateFormat.format(new Date(rTime));

        }else if (time_need_clean.contains(YESTERDAY)) {
            long rTime = nowTime - (ONE_DAY);
            releaseTime = simpleDateFormat.format(new Date(rTime));

        }else if (time_need_clean.contains(TODAY)) {
            releaseTime = simpleDateFormat.format(new Date(nowTime));

        }
        else if (time_need_clean.contains(ONE_WEEK_AGO)) {
            int time_base = Integer.parseInt(time_need_clean.replace(ONE_WEEK_AGO, ""));
            long rTime = nowTime - (time_base * ONE_WEEK);
            releaseTime = simpleDateFormat.format(new Date(rTime));

        } else if (time_need_clean.contains(ONE_MONTH_AGO)) {
            int time_base = Integer.parseInt(time_need_clean.replace(ONE_MONTH_AGO, ""));
            calendar.setTime(new Date(nowTime));
            calendar.add(Calendar.MONTH, time_base * -1);
            releaseTime = simpleDateFormat.format(calendar.getTime());

        } else if (time_need_clean.contains(ONE_YEAR_AGO)) {
            int time_base = Integer.parseInt(time_need_clean.replace(ONE_YEAR_AGO, ""));
            calendar.setTime(new Date(nowTime));
            calendar.add(Calendar.YEAR, time_base * -1);
            releaseTime = simpleDateFormat.format(calendar.getTime());

        } else if (!time_need_clean.equalsIgnoreCase("") && time_need_clean.trim().getBytes().length <= 10) {
            releaseTime = time_need_clean + " 00:00:00";

        }else if (time_need_clean.getBytes().length > 10 && time_need_clean.getBytes().length < 17){
            releaseTime = time_need_clean + ":00";
        }else {
            releaseTime = time_need_clean;
        }

        return releaseTime;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }


}
