package com.chance.cc.crawler.development.scripts.zju;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import groovy.json.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;

public class ZjuNewsCrawlerScript extends CrawlerCommonScript {
    private Logger log= LoggerFactory.getLogger(ZjuNewsCrawlerScript.class);

    private static final String url="http://www.news.zju.edu.cn";
    @Override
    public String domain() {
        return "zju";
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular("http://www.news.zju.edu.cn/\\S*/list\\d+.htm");
        addUrlRegular("http://www.news.zju.edu.cn/\\d*/\\d*/\\S*/page.htm");
        addUrlRegular("http://www.news.zju.edu.cn/_visitcountdisplay\\?siteId=27&type=3&articleId=\\d*");
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks=new ArrayList<>();
        String lastUrl = httpPage.getRequest().getUrl();
        if(lastUrl.matches("http://www.news.zju.edu.cn/\\S*/list\\d+.htm")){
            String nextPageUrl = httpPage.getHtml().xpath("//a[@class=\"next\"]/@href").get();
            nextPageUrl=url+nextPageUrl;
            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .build();

            parsedLinks.add(turnPageRequest);

            List<Selectable> nodes=httpPage.getHtml().xpath("//div[@id=\"wp_news_w32\"]/ul/li").nodes();
            for(Selectable node:nodes){
                String itemUrl=node.xpath("./span[@class=\"cols_title\"]/a/@href").get();
                if(StringUtils.isBlank(itemUrl)){
                    continue;
                }
                itemUrl=url+itemUrl;

                String pubTime = node.xpath(".//span[@class=\"cols_meta\"]").get();
                try {
                    long releaseTime = DateUtils.parseDate(pubTime, "yyyy/MM/dd").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTime)
                            .resultLabelTag(article)
                            .build();
                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }

            }
        }else if(lastUrl.matches("http://www.news.zju.edu.cn/\\d*/\\d*/\\S*/page.htm")){
            String releaseTime = httpPage.getHtml().xpath("//span[@class=\"arti-update\"]/text()").get();//发布时间：2020-09-30
            releaseTime=releaseTime.substring(releaseTime.indexOf("：")+1);
            if (StringUtils.isEmpty(releaseTime)) {
                return parsedLinks;
            }

            //互动量地址解析
            String interactionUrl=httpPage.getHtml().xpath("//span[@class=\"WP_VisitCount\"]/@url").get();
            if(interactionUrl!=null){
                interactionUrl = url + interactionUrl.substring(0, interactionUrl.indexOf("&") + 1) +
                        interactionUrl.substring(interactionUrl.indexOf("&") + 5, interactionUrl.lastIndexOf("&") + 1) +
                        interactionUrl.substring(interactionUrl.lastIndexOf("&") + 5);
                try {
                    CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(interactionUrl)
                            .httpUrl(interactionUrl)
                            .releaseTime(DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime())
                            .needParsed(false)
                            .resultLabelTag(interaction)
                            .build();

                    parsedLinks.add(interactionRecord);

                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }else{
                return parsedLinks;
            }


        }

        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));

        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        Map<String,String> article = new HashMap<>();
        try {
            List<String> texts = httpPage.getHtml().xpath("//div[@class=\"wp_articlecontent\"]/p/text()").all();
            if(texts.size()<1){
                texts = httpPage.getHtml().xpath("//div[@class=\"wp_articlecontent\"]/p/span/text()").all();
            }

            String releaseTime = httpPage.getHtml().xpath("//span[@class=\"arti-update\"]/text()").get();//发布时间：2020-09-30
            releaseTime=releaseTime.substring(releaseTime.indexOf("：")+1);//2020-09-30

            String author = httpPage.getHtml().xpath("//span[@class=\"arti-author\"]/text()").get();//作者：柯溢能 朱原之
            if(author==null){
                author=httpPage.getHtml().xpath("//span[@class=\"arti-info\"]/text()").get();
            }
            if(author!=null) {
                author = author.substring(author.indexOf("：") + 1);//柯溢能 朱原之
            }
            StringBuffer conents = new StringBuffer();
            for (String text : texts) {
                conents.append(text).append(" ");
            }

            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.split("/")[itemUrl.split("/").length-2];

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime,"yyyy-MM-dd").getTime())
                    .addContentKV("content",conents.toString().trim())
                    .addContentKV("title",httpPage.getHtml().xpath("//h1[@class=\"arti-title rol-title\"]/text()").get())
                    .addContentKV("author",author)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage(),e);
        }
        return null;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String requestUrl = httpPage.getRequest().getUrl();
        String articleKey = requestUrl.substring(requestUrl.lastIndexOf("=") + 1);
        String interactionValue=httpPage.getHtml().xpath("//body/text()").get();
        if(interactionValue!=null || !interactionValue.equals("")){
            interactionValue=interactionValue.substring(0,interactionValue.indexOf("&"));
        }
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                .url(requestUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV("views",interactionValue.trim())
                .build();
        return crawlerData;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }
}
