package com.chance.cc.crawler.development.scripts.cn21;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.TagsUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;

/**
 * created by CC on 2020/11/5
 * mail 279020185@qq.com
 */
public class CN21CrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(CN21CrawlerScript.class);

    public static final String listUrlPrefix = "http://news.21cn.com/domestic/";

    /**
     * 脚本domain定义
     * @return
     */
    public String domain() {
        return "21cn";
    }

    /**
     * 进入脚本的正则列表
     */
    public void initUrlRegulars() {
        addUrlRegular("http://news.21cn.com/domestic/\\S*"); //翻页
        addUrlRegular("http://news\\.21cn\\.com/\\S*/a/\\d{4}/\\d{4}/\\d{2}/\\d*\\.shtml"); //详情

    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     * @param crawlerRequestRecord
     * @return
     */
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }

    /**
     * 解析链接方法
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        //下一页解析
        String nextPageUrl = listUrlPrefix + httpPage.getHtml().xpath("//a[@class=\"next\"]/@href").get();

        CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .recordKey(nextPageUrl)
                .httpUrl(nextPageUrl)
                .releaseTime(System.currentTimeMillis())
                .build();

        parsedLinks.add(turnPageRequest);

        //明细页解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"art-list\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath("./div/div/h3/a/@href").get();
            if (StringUtils.isBlank(itemUrl)){
                continue;
            }
//            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
            String pubTime = node.xpath("./div//span[@class=\"time\"]").get();
            if (StringUtils.isBlank(pubTime)){
                continue;
            }
            if (!pubTime.contains("年")) {
                Calendar date = Calendar.getInstance();
                String year = String.valueOf(date.get(Calendar.YEAR));
                pubTime = year + "年" + pubTime;
            }

            try {
                long releaseTime = DateUtils.parseDate(pubTime, "yyyy年MM月dd日 HH:mm").getTime();

                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(itemUrl)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTime)
                        .resultLabelTag(article)
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                e.printStackTrace();
            }

        }

        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)){
            crawlerDataList.add(washArticle(crawlerRecord,page));

        }

        if (crawlerRecord.tagsCreator().resultTags().hasDataType(interaction)){
            crawlerDataList.add(washInteraction(crawlerRecord,page));
        }

        // todo
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        try {

            List<String> texts = httpPage.getHtml().xpath("//div[@class=\"clearfix\"]/p/text()").all();
            String releaseTimeStr = httpPage.getHtml().xpath("//span[@class=\"pubTime\"]").get();

            StringBuffer conents = new StringBuffer();
            for (String text : texts) {
                conents.append(text).append(" ");
            }

            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTimeStr,"yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV("content",conents.toString().trim())
                    .addContentKV("title",httpPage.getHtml().xpath("//h1[@class=\"title\"]").get())
                    .addContentKV("author",httpPage.getHtml().xpath("//span[@class=\"infoAuthor\"]").get())
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage(),e);
        }
        return null;
    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        return null;
    }


    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }
}
