package com.chance.cc.crawler.development.scripts.doctor001;

import com.alibaba.fastjson.JSON;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.meta.core.bean.common.MetaResponse;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainKeys;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateFormatUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author baixin
 *
 *  web： 保险快报网
 **/
public class Docotor001SearchCrawlerScript extends CrawlerCommonScript {

    private static Logger logger = LoggerFactory.getLogger(Docotor001SearchCrawlerScript.class);
    public static final String startUrl = "http://www.doctor001.com/plus/search.php";
    public static final String nextPageUrlRegular = "http://www.doctor001.com/plus/search.php\\S*";
    public static final String articleUrlRegular = "https://www.doctor001.com/\\S*/\\d+.html";

    public static final String domain = "doctor001";
    public static final String doctor001Site = "search";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return domain;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(startUrl);
        addUrlRegular(nextPageUrlRegular);
        addUrlRegular(articleUrlRegular);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return doctor001Site.equals(site);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> allItemRecords = new ArrayList<>();

        if (supportSourceRecords == null || supportSourceRecords.size() <1){
            return super.prepareRequest(requestRecord,supportSourceRecords);
        }
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            if (supportSourceRecord.getHttpRequest().getUrl().endsWith("/v1/meta/"+domain()+"/keys?site="+doctor001Site)){
                try {
                    initAllCrawlerRecordByKeyword(requestRecord,supportSourceRecord,allItemRecords);
                } catch (Exception e) {
                    logger.error("init keyword record error : {}",e.getMessage());
                }
            }

        }
        return allItemRecords;
    }

    private void initAllCrawlerRecordByKeyword(CrawlerRequestRecord requestRecord,
                                               CrawlerRequestRecord supportSourceRecord,
                                               List<CrawlerRecord> allItemRecords){
        try {
            HttpPage httpPage = supportSourceRecord.getInternalDownloadPage();
            MetaResponse metaResponse = JSON.parseObject(httpPage.getRawText(), MetaResponse.class);
            if (metaResponse.getStatus() == 0 && metaResponse.getContent() != null){
                List<String> contents = (List<String>) metaResponse.getContent();
                for (String content : contents) {
                    CrawlerDomainKeys crawlerDomainKeys = JSON.parseObject(content, CrawlerDomainKeys.class);
                    String keyword = crawlerDomainKeys.getKeyword();

                    CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(requestRecord)
                            .httpUrl(String.format("http://www.doctor001.com/plus/search.php?q=%s&searchtype=title", keyword))
                            .httpHeads(requestRecord.getHttpRequest().getHeaders())
                            .releaseTime(System.currentTimeMillis())
                            .copyBizTags()
                            .notFilterRecord()
                            .build();
                    crawlerRequestRecord.tagsCreator().bizTags().addKeywords(keyword);
                    allItemRecords.add(crawlerRequestRecord);
                }
            }
        } catch (Exception e) {
            logger.error(e.getMessage(),e);
        }
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess()) {
            logger.error(domain() + " page download error!will retry");
            addCrawlerRecords(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        if (isUrlMatch(requestUrl,nextPageUrlRegular)) {
            nextPageParseLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }

        return parsedLinks;
    }


    private void nextPageParseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //翻页

        String nextPage = httpPage.getHtml().xpath("//a[text()='下一页']/@href").get();
        if(StringUtils.isNotBlank(nextPage)){
            if (!nextPage.startsWith("http")){
                nextPage = "http://www.doctor001.com" + StringEscapeUtils.unescapeHtml(nextPage);
            }
            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPage)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(turnRecord);
        }


        //列表页解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"resultlist\"]/ul/li").nodes();
        for (Selectable node : nodes) {
            try {
                String itemUrl = node.xpath(".//a/@href").get();
                if (StringUtils.isNotBlank(itemUrl)){
                    if (!itemUrl.startsWith("http")){
                        itemUrl = "https://www.doctor001.com" + itemUrl;
                    }
                    String releaseTime = node.xpath(".//span/text()[5]").get().trim().replace("&#xD;","");
                    String views = node.xpath(".//span/text()[4]").get().trim().replace("&#xD;","");
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(itemUrl)
                            .releaseTime(dateToTimestamp(releaseTime))
                            .needWashed(true)
                            .copyBizTags()
                            .build();
                    itemRecord.tagsCreator().bizTags().addCustomKV("releaseTime",releaseTime);
                    itemRecord.tagsCreator().bizTags().addCustomKV(Field_I_Views,views);
                    parsedLinks.add(itemRecord);
                }

            } catch (Exception e) {
                logger.error(e.getMessage());
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = page.getRequest().getUrl();
        if (isUrlMatch(requestUrl,articleUrlRegular)) {
            articleWash(crawlerRecord,page,crawlerDataList);
        }
        return crawlerDataList;
    }

    private void articleWash(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerData> crawlerDataList) {
        try {
            String title = page.getHtml().xpath("//div[@class=\"title\"]/h2/text()").get();
            title = StringEscapeUtils.unescapeHtml(StringEscapeUtils.unescapeHtml(title.trim()));
            List<String> contents = page.getHtml().xpath("//td/p/text()").all();
            StringBuffer contentBuffer = new StringBuffer();
            for (String content : contents) {
                contentBuffer.append(StringEscapeUtils.unescapeHtml(StringEscapeUtils.unescapeHtml(content.trim())));
            }
            List<String> images = page.getHtml().xpath("//td/p/img/@src").all();
            StringBuffer sbImage = new StringBuffer();
            if (images != null){
                for (String image : images) {
                    if(!image.startsWith("http")){
                        image += "https://www.doctor001.com/";
                    }
                    sbImage.append(image).append("\\x01");
                }
            }
            String releaseTime = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("releaseTime");
//            String likes = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(Field_I_Likes);
            String views = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(Field_I_Views);
            String source = page.getHtml().xpath("//div[@class=\"info\"]/text()[3]").get().trim();
            String author = page.getHtml().xpath("//div[@class=\"info\"]/text()[4]").get().trim();

            String url = page.getRequest().getUrl();
            String contentId = url.substring(url.lastIndexOf("/")+1,url.lastIndexOf("."));
            CrawlerData crawlerArticleData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", domain(),doctor001Site, CrawlerEnum.CrawlerDataType.article.enumVal(), contentId))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .url(url)
                    .releaseTime(dateToTimestamp(releaseTime))
                    .addContentKV(Field_Content,contentBuffer.toString().trim())
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Source,source)
                    .addContentKV(Field_Author,author)
                    .addContentKV(Field_Images,sbImage.toString())
                    .build();
            crawlerArticleData.tagsCreator().bizTags().addDomain(domain());
            crawlerArticleData.tagsCreator().bizTags().addSite(doctor001Site);
            crawlerDataList.add(crawlerArticleData);


            CrawlerData crawlerArticleInteractionData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", domain(),doctor001Site, CrawlerEnum.CrawlerDataType.interaction.enumVal(), contentId))
                    .parentId(StringUtils.joinWith("-", domain(),doctor001Site, CrawlerEnum.CrawlerDataType.article.enumVal(), contentId))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .url(url)
                    .releaseTime(dateToTimestamp(releaseTime))
//                    .addContentKV(Field_I_Likes,likes)
                    .addContentKV(Field_I_Views,views)
                    .build();
            crawlerArticleData.tagsCreator().bizTags().addDomain(domain());
            crawlerArticleData.tagsCreator().bizTags().addSite(doctor001Site);
            crawlerDataList.add(crawlerArticleInteractionData);
        }catch (Exception e){
            logger.error(e.getMessage());
        }

    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void addCrawlerRecords(List<CrawlerRequestRecord> crawlerRequestRecords,CrawlerRequestRecord crawlerRecord){

        int count = 1;
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        if (crawlerBusinessTags.hasKVTag("download_retry_count")){
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag("download_retry_count").getVal();
            if (count >= 20){
                logger.error(domain() + "download he number of retries exceeds the limit" +
                        ",request url {},download detail {}",crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        count++;
        crawlerBusinessTags.addCustomKV("download_retry_count",count);

        CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRecord)
                .httpRequest(crawlerRecord.getHttpRequest())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .notFilterRecord()
                .build();
        crawlerRequestRecords.add(crawlerRequestRecord);

    }

    public static long dateToTimestamp(String dataStr) throws ParseException {
        String regEx="[^0-9]+"; //正则表达式，用于匹配非数字串，+号用于匹配出多个非数字串
        Pattern pattern = Pattern.compile(regEx);

        if (dataStr.equals("刚刚")){
            return System.currentTimeMillis();
        } else if (Pattern.matches("\\d*秒前",dataStr)){
            int number = Integer.valueOf(pattern.split(dataStr)[0]);
            return (System.currentTimeMillis() - number*1000L);
        } else if (Pattern.matches("\\d*分钟前",dataStr)){
            int number = Integer.valueOf(pattern.split(dataStr)[0]);
            return (System.currentTimeMillis() - number*60*1000L);
        } else if (Pattern.matches("\\d*小时前",dataStr)){
            int number = Integer.valueOf(pattern.split(dataStr)[0]);
            return (System.currentTimeMillis() - number*60*60*1000L);
        } else if (Pattern.matches("\\d*小时\\d*分钟前",dataStr)){
            int hourNumber = Integer.valueOf(pattern.split(dataStr)[0]);
            int secondNumber = Integer.valueOf(pattern.split(dataStr)[1]);
            long totalMillis = hourNumber*60*60*1000L + secondNumber*60*1000L;
            return (System.currentTimeMillis() - totalMillis);
        } else if (dataStr.startsWith("今天")){
            String currentTime = DateFormatUtils.format(System.currentTimeMillis(), TimeForamtEnum.format1.getFormat());
            dataStr = dataStr.replace("今天", currentTime);
        } else if (dataStr.startsWith("昨天")){
            String yesterdayTime = DateFormatUtils.format(System.currentTimeMillis() - 60*60*24*1000L, TimeForamtEnum.format1.getFormat());
            dataStr = dataStr.replace("昨天", yesterdayTime);
        } else if (dataStr.startsWith("前天")){
            String beforeYesterdayTime = DateFormatUtils.format(System.currentTimeMillis() - 2*60*60*24*1000L, TimeForamtEnum.format1.getFormat());
            dataStr = dataStr.replace("前天", beforeYesterdayTime);
        } else if (Pattern.matches("\\d{2}月\\d{2}[日]*", dataStr)){
            Calendar cal = Calendar.getInstance();
            dataStr = cal.get(Calendar.YEAR)+"年"+dataStr;
        } else if (Pattern.matches("\\d{2}-\\d{2}", dataStr)){
            Calendar cal = Calendar.getInstance();
            dataStr = cal.get(Calendar.YEAR)+"-"+dataStr;
        }
        return DateUtils.parseDateStrictly(dataStr, TimeForamtEnum.allFormats()).getTime();
    }

    public enum TimeForamtEnum {
        format1("yyyy年MM月dd日"),
        format1_1("yyyy年MM月dd日 HH:mm:ss"),
        format1_2("yyyy年MM月dd日 HH:mm"),
        format1_3("yyyy年MM月dd日HH:mm"),
        format1_4("yyyy年MM月dd日 HH点mm分"),
        format1_5("yyyy年MM月dd日 HH点mm分ss秒"),
        format1_6("yyyy年MM月dd日HH点mm分"),
        format1_7("yyyy年MM月dd日HH点mm分ss秒"),


        format4("yyyy-MM-dd HH:mm:ss"),
        format5("yyyy-MM-dd HH:mm"),
        format6("yyyy-MM-dd"),

        format7("yyyy/MM/dd HH:mm:ss"),
        format7_1("MM/dd/yyyy HH:mm:ss"),
        format8("yyyy/MM/dd HH:mm"),
        format9("yyyy/MM/dd"),

        format10("yyyy.MM.dd HH:mm:ss"),
        format11("yyyy.MM.dd HH:mm"),
        format12("yyyy.MM.dd"),

        format13("EEE MMM d HH:mm:ss +0800 yyyy");

        private String format;

        private TimeForamtEnum(String format) {
            this.format = format;
        }

        public static String[] allFormats() {
            TimeForamtEnum[] timeForamtEnums = TimeForamtEnum.values();
            String[] formats = new String[timeForamtEnums.length];
            for (int count = 0; count < timeForamtEnums.length; count++) {
                formats[count] = timeForamtEnums[count].format;
            }
            return formats;
        }

        public String getFormat() {
            return format;
        }
    }
}
