package com.chance.cc.crawler.development.scripts.cankao;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/11/16 17:16
 * @Description
 *      参考消息
 **/
public class CankaoCrawlerScript  extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(CankaoCrawlerScript.class);

    public static final String listUrlPrefix = "http://china.cankaoxiaoxi.com/";
    public static final int pageLinkCount = 6;

    /**
     * 脚本domain定义
     * @return
     */
    @Override
    public String domain() {
        return "cankao";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular("http://\\S*.cankaoxiaoxi.com/");//入口地址
        addUrlRegular("http://\\S*.cankaoxiaoxi.com/\\S*.shtml"); //详情
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();
        if(requestUrl.matches("http://www.cankaoxiaoxi.com/")){
            //入口地址解析板块地址
            List<String> modelLinks = httpPage.getHtml().xpath("//ul[@class=\"mainNav ov\"]/li/a/@href").all();
            for (String modelLink : modelLinks) {
                //锐参考、参考智库未做
                if(modelLink.matches("http://(china|world|mil|column|finance|culture|science|sports).cankaoxiaoxi.com/")){
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(modelLink)
                            .recordKey(modelLink)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .build();

                    parsedLinks.add(itemRecord);
                }
            }
        } else if(requestUrl.matches("http://(china|world|mil|column|finance|culture|science|sports|ihl).cankaoxiaoxi.com/")){
            //文章列表解析
            List<Selectable> nodes = httpPage.getHtml().xpath("//div[contains(@class,'info')]|//div[@class=\"oneNewsInfo\"]").nodes();
            for (Selectable node : nodes) {
                String itemUrl = node.xpath("./p/a/@href").get();
                if (StringUtils.isBlank(itemUrl)){
                    continue;
                }

                String releaseTime = node.xpath(".//span[@class=\"date_tag\"]/text()|.//span[@class=\"dateTag\"]/text()").get();
                if(StringUtils.isEmpty(releaseTime)){
                    releaseTime = getString("\\d{8}",itemUrl);
                }
                if (StringUtils.isBlank(releaseTime)){
                    continue;
                }

                try {
                    Long releaseTimeToLong = null;
                    if(releaseTime.matches("\\d{8}")){
                        releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyyMMdd").getTime();
                    }else{
                        releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime();
                    }

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .resultLabelTag(article)
                            .build();

                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }else if(requestUrl.matches("http://www.cankaoxiaoxi.com/[a-zA-Z]*/\\d{8}/\\d*.shtml|http://[a-zA-Z]*.cankaoxiaoxi.com/\\d{4}/\\d{4}/\\d*.shtml")){
            //分页解析
            List<String> pageLinks = httpPage.getHtml().xpath("//ul[@class=\"pageUl\"]/li/a/@href").all();
            List<String> news = pageLinks.stream().distinct().collect(Collectors.toList());//进行地址去重的操作
            for (String pageLink : news) {
                //最多翻五页
                if(pageLink.matches("http://www.cankaoxiaoxi.com/[a-zA-Z]*/\\d{8}/\\d*_\\d*.shtml|http://[a-zA-Z]*.cankaoxiaoxi.com/\\d{4}/\\d{4}/\\d*_\\d*.shtml") && parsedLinks.size() < 4){
                    CrawlerRequestRecord pageRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(pageLink)
                            .httpUrl(pageLink)
                            .needParsed(false)
                            .releaseTime(crawlerRequestRecord.getReleaseTime())
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                            .build();

                    parsedLinks.add(pageRecord);
                }
            }
        }
        return parsedLinks;
    }

    /**
     * @author Zhao.Hhuan
     * @date 2020/11/17 13:17
     * @description:
     *     解析文章中翻页的数据
     * @param crawlerRecord
	 * @param internalDownloadRecords
	 * @param links
     * @return void
     **/
    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        List<StringBuffer> pageText = new ArrayList<>();
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();

            List<String> all = internalDownloadPage.getHtml().xpath("//div[@class=\"articleText\"]//text()").all();
            StringBuffer stringBuffer = new StringBuffer();
            for (String text : all) {
                stringBuffer.append(StringEscapeUtils.unescapeHtml4(text)).append(" ");
            }
            pageText.add(stringBuffer);
        }
        Map<String,Object> extras = new HashMap<>();
        extras.put("pageText",pageText);
        crawlerRecord.getHttpRequest().setExtras(extras);
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));

        }
        // todo
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        try {
            String articleAbs = StringUtils.isEmpty(httpPage.getHtml().xpath("//div[@class=\"articleAbs\"]/span/text()").get()) ? "" : httpPage.getHtml().xpath("//div[@class=\"articleAbs\"]/span/text()").get();
            List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"articleText\"]//text()").all();

            StringBuffer conents = new StringBuffer();
            conents.append(articleAbs);
            for (String articleText : articleTextList) {
                conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
            }

            //添加分页的数据
            Map<String, Object> extras = httpPage.getRequest().getExtras();
            if(extras != null){
                List<StringBuffer> pageText = (List<StringBuffer>)httpPage.getRequest().getExtras().get("pageText");
                for (StringBuffer stringBuffer : pageText) {
                    conents.append(stringBuffer);
                }
            }

            String releaseTimeStr = httpPage.getHtml().xpath("//span[@id=\"pubtime_baidu\"]/text()").get();
            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("."));

            String author = httpPage.getHtml().xpath("//span[@id=\"source_baidu\"]").get();
            if(StringUtils.isNotEmpty(author) && author.contains("：")){
                author = author.substring(author.lastIndexOf("：") + 1);
            }

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTimeStr,"yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV(Field_Content,conents.toString().trim())
                    .addContentKV(Field_Title,StringEscapeUtils.unescapeHtml4(httpPage.getHtml().xpath("//h1[@class=\"articleHead\"]").get()))
                    .addContentKV(Field_Author,author)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage(),e);
        }
        return null;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 获取指定格式的字符串
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx,String input){
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()){
            return matcher.group(0);
        }
        return null;
    }

}
