package com.chance.cc.crawler.development.scripts.medicinal;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * created by ljl 20210628
 * 医药网  行业动态板块
 */

public class MedicinalDongtaiCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(MedicinalDongtaiCrawlerScript.class);

    public static final String listUrlPrefix = "http://www.medicinal.cn/html/dongtai/";

    /**
     * 脚本domain定义
     *
     * @return
     */
    public String domain() {
        return "medicinalDongtai";
    }

    /**
     * 进入脚本的正则列表
     */
    public void initUrlRegulars() {
        addUrlRegular("http://www.medicinal.cn/html/dongtai/\\S*"); //翻页
        addUrlRegular("http://www\\.medicinal\\.cn/xinwen/\\d+/\\d+\\.html"); //详情
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }

    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        //下一页解析
        String nextPageUrl = listUrlPrefix + httpPage.getHtml().xpath("//div[@class=\"plist\"]/li/a[text()='下一页']/@href").get();

        CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .recordKey(nextPageUrl)
                .httpUrl(nextPageUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();
        parsedLinks.add(turnPageRequest);

        //明细页解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//a[@class=\"td_24\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath("./@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            }

            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(itemUrl)
                    .httpUrl(itemUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .resultLabelTag(article)
                    .copyBizTags()
//                    .resultLabelTag(interaction)
                    .build();
            parsedLinks.add(itemRecord);

        }

        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {

        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)) {
            CrawlerData crawlerData = washArticle(crawlerRecord, page);
            if(crawlerData!=null){
                crawlerDataList.add(crawlerData);
            }
        }

        if (crawlerRecord.tagsCreator().resultTags().hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        return crawlerDataList;
    }


    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        try {
            List<String> texts = httpPage.getHtml().xpath("//td[@class=\"td_24\"]/p/text()").all();
            StringBuffer contents = new StringBuffer();
            for (String text : texts) {
                contents.append(text).append(" ");
            }
            String content = unescapeHtml2J(contents.toString().trim());

            List<String> allImages = httpPage.getHtml().xpath("//td[@class=\"td_24\"]/p//img/@src").all();
            StringBuffer sbImage = new StringBuffer();
            for (String allImage : allImages) {
                sbImage.append(allImage).append("\\x01");
            }

            String releaseTimeStr = httpPage.getHtml().xpath("//span[@class=\"link-hui\"]/following-sibling::text()").get();
            Pattern pattern = Pattern.compile("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}");
            Matcher matcher = pattern.matcher(releaseTimeStr);
            if(matcher.find()) {
                releaseTimeStr = matcher.group(0);
            }

            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];

            String title = unescapeHtml2J(httpPage.getHtml().xpath("//td[@class=\"font_17\"]/text()").get());

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTimeStr, "yyyy-MM-dd HH:mm").getTime())
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, httpPage.getHtml().xpath("//span[@class=\"link-hui\"]/text()").get())
                    .addContentKV(Field_Images,sbImage.toString())
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage(), e);
        }
        return null;
    }


    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        Html html = httpPage.getHtml();

        String releaseTimeStr = httpPage.getHtml().xpath("//span[@class=\"link-hui\"]/following-sibling::text()").get();
        Pattern pattern = Pattern.compile("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}");
        Matcher matcher = pattern.matcher(releaseTimeStr);
        if(matcher.find()) {
            releaseTimeStr = matcher.group(0);
        }

        String views = "";
        String likes = "";

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
        try {
            CrawlerData crawlerInteractionData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(itemUrl)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), articleKey))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .releaseTime(DateUtils.parseDate(releaseTimeStr, "yyyy-MM-dd HH:mm").getTime())
                    .addContentKV(Field_I_Likes,likes)
                    .addContentKV(Field_I_Views,views)
                    .resultLabelTag(interaction)
                    .build();
        } catch (ParseException e) {
            e.printStackTrace();
        }
        return null;
    }

    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {
    }

    public static String unescapeHtml2J(String str){
        int times = 0;
        while (str.contains("&") && str.contains(";")){
            str = StringEscapeUtils.unescapeHtml(str);
            times ++;
            if (times > 5){
                break;
            }
        }
        return str;
    }

}
