package com.chance.cc.crawler.development.scripts.voc;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;

public class VocCrawlerScript extends CrawlerCommonScript {
    private Logger log= LoggerFactory.getLogger(VocCrawlerScript.class);
    private static final String url="http://news.voc.com.cn/";
    @Override
    public String domain() {
        return "voc";
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular("http://news\\.voc\\.com\\.cn/");
        addUrlRegular("http://news\\.voc\\.com\\.cn/class/\\d*_\\d*\\.html");
        addUrlRegular("http://\\S*\\.voc\\.com\\.cn/article/\\d*/\\d*\\.html");
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks=new ArrayList<>();

        String lastUrl = httpPage.getRequest().getUrl();
        //获取当前页数
        if(lastUrl.matches("http://news\\.voc\\.com\\.cn/")){
            List<String> listUrl=httpPage.getHtml().xpath("//div[@class=\"nav1000\"]/ul/li/a/@href").all();
            for(String itemUrl:listUrl){
                itemUrl=itemUrl.substring(0,itemUrl.lastIndexOf("."));
                itemUrl=itemUrl+"_1.html";
                if(itemUrl.matches("http://news.voc.com.cn/class/(2200|1922)_1.html")){
                    CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(System.currentTimeMillis())
                            .build();

                    parsedLinks.add(turnPageRequest);
                }
            }

        }else if(lastUrl.matches("http://news\\.voc\\.com\\.cn/class/\\d*_\\d*\\.html")){
            int currentPage = Integer.parseInt(lastUrl.substring(lastUrl.lastIndexOf("_") + 1).split("\\.")[0]);
            String nextPageUrl = null;
            if (currentPage <= 20){
                nextPageUrl = lastUrl.substring(0 , lastUrl.lastIndexOf("_") + 1) + (currentPage + 1) + ".html";
            }

            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .build();

            parsedLinks.add(turnPageRequest);

            List<Selectable> nodes=httpPage.getHtml().xpath("//div[@class=\"newsmod\"]").nodes();
            for(Selectable node:nodes){
                String itemUrl=node.xpath("./h3/a/@href").get();

                if(!itemUrl.matches("http://\\S*.voc.com.cn/article/\\d*/\\d*.html")){
                    itemUrl=url+itemUrl.substring(itemUrl.indexOf("/")+1);
                }


                if(StringUtils.isBlank(itemUrl)){
                    continue;
                }

                String pubTime=node.xpath(".//div[@class=\"time_a\"]/p/text()").get();

                if (!pubTime.contains("年")) {
                    Calendar date = Calendar.getInstance();
                    String year = String.valueOf(date.get(Calendar.YEAR));
                    pubTime = year + "年" + pubTime;
                }

                try {
                    long releaseTime = DateUtils.parseDate(pubTime, "yyyy年MM月dd日 HH:mm").getTime();
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTime)
                            .resultLabelTag(article)
                            .needParsed(false)
                            .build();
                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }

            }
        }






        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));

        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        Map<String,String> article = new HashMap<>();
        httpPage.getRequest().getUrl();
        try {

            List<String> texts = httpPage.getHtml().xpath("//div[@id=\"content\"]/p/span/text() | //div[@id=\"content\"]/p/text()").all();

            String releaseTimeStr = httpPage.getHtml().xpath("//div[@class=\"font_time\"]/div/text()").get();

            String author = "";
            Matcher mtAuthor = Pattern.compile("\\[来源:.*?\\]").matcher(releaseTimeStr);
            while (mtAuthor.find()){
                author = mtAuthor.group(0).split(":")[1].replace("]","");
                mtAuthor = Pattern.compile("\\[作者:.*?\\]").matcher(releaseTimeStr);
                while (mtAuthor.find()){
                    author = mtAuthor.group(0).split(":")[1].replace("]","");
                }
            }

            Matcher mtPubTime = Pattern.compile("\\d*[-/]\\d*[-/]\\d* \\d*:\\d*:\\d*").matcher(releaseTimeStr);
            String pubTime = null;
            while (mtPubTime.find()){
                pubTime = mtPubTime.group(0);
            }


            if(pubTime==null){
                pubTime = httpPage.getHtml().xpath("//span[@id=\"pubtime_baidu\"]/text()").get();
                author = httpPage.getHtml().xpath("//span[@id=\"source_baidu\"]/a/text()").get();
            }
            if(author.equals("")){
                List<String> authorStr = httpPage.getHtml().xpath("//div[@class=\"font_time\"]/div/text()").all();

                StringBuffer conentsAuthor = new StringBuffer();
                for (String text : authorStr) {
                    conentsAuthor.append(text).append(" ");
                }

                Matcher myAuthor = Pattern.compile("\\[来源:.*?\\]").matcher(conentsAuthor.toString());
                while (myAuthor.find()){
                    author = myAuthor.group(0).split(":")[1].replace("]","");
                    myAuthor = Pattern.compile("\\[作者:.*?\\]").matcher(conentsAuthor.toString());
                    while (myAuthor.find()){
                        author = myAuthor.group(0).split(":")[1].replace("]","");
                    }
                }

            }


            StringBuffer conents = new StringBuffer();
            for (String text : texts) {
                conents.append(text).append(" ");
            }

            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV("content",conents.toString().trim())
                    .addContentKV("title",httpPage.getHtml().xpath("//div[@class=\"main_l\"]/h1/text()").get())
                    .addContentKV("author",author)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage(),e);
        }
        return null;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        return null;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }
}
