package com.chance.cc.crawler.development.scripts.sina.module;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Topic_Type;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/2/4 11:47
 * @Description 新浪 医药板块
 **/
public class SinaMedCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(SinaMedCrawlerScript.class);
    private static final String DOMAIN = "sina";
    private static final String SITE = "med";
    private static final String REQUEST_AGAIN_TAG = "sina_request_retry";

    private static final String MED_ENTRANCE_URL = "http[s]*://med.sina.com/";
    private static final String MED_HTML_URL = "http[s]*://med.sina.com/article_list_-1_1_\\d+_\\d+.html";
    private static final String ITEM_URL = "http[s]*://med.sina.com/article_detail_\\d+_\\d+_\\d+.html";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(MED_ENTRANCE_URL);
        addUrlRegular(MED_HTML_URL);
        addUrlRegular(ITEM_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return site.equals(SITE) || StringUtils.isBlank(site);
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || (httpPage.getStatusCode() != 200 && httpPage.getStatusCode() != 404)) {
            log.error("{} status code : {}", crawlerRequestRecord.getHttpRequest().getUrl(), httpPage.getStatusCode());
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (httpPage.getStatusCode() == 404) {
            log.error("{} status code : {}", crawlerRequestRecord.getHttpRequest().getUrl(), httpPage.getStatusCode());
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if(requestUrl.matches(MED_ENTRANCE_URL)){
            medEntranceUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(MED_HTML_URL)){
            medHtmlUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(ITEM_URL)){
            itemUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return parsedLinks;
    }

    private void medEntranceUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        CrawlerRequestRecord htmlRecord = getEntraceHtmlRecord(crawlerRequestRecord, httpPage);
        if(htmlRecord != null){
            parsedLinks.add(htmlRecord);
        }
    }

    private void medHtmlUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //翻页
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("-1_1_");
        String[] split1 = split[1].split("_");
        String nextUrl = split[0] + "-1_1_" + (Integer.parseInt(split1[0]) + 1) + "_" + split1[1];
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parsedLinks.add(turnRecord);

        //详情页解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//li/div[@class=\"indextext-right\"]").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath(".//a[@class=\"indextext-title\"]/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                return;
            }

            String releaseTime = node.xpath(".//span[@class=\"indextext-time\"]/text()").get();
            if (StringUtils.isBlank(releaseTime)) {
                return;
            } else {
                releaseTime = releaseTime.trim();
                releaseTime = releaseTime.split("\\.")[0];
            }

            try {
                long releaseTimeToLong = washTimeToLong(releaseTime);
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.interaction)) {
            resultTags.getCategoryTag().removeLabelTag("interaction");
        }

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.comment)) {
            resultTags.getCategoryTag().removeLabelTag("comment");
        }
    }
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags resultTags = crawlerRecord.tagsCreator().resultTags();

        if(resultTags.hasDataType(CrawlerEnum.CrawlerDataType.article)){
            crawlerDataList.add(washArticle(crawlerRecord,page));
        }

        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        String title = httpPage.getHtml().xpath("//h1/text()").get();
        String author = "";
        String source = "";
        List<Selectable> nodes = httpPage.getHtml().xpath("//span[contains(@class,'wz-zuthorname')]").nodes();
        for (Selectable node : nodes) {
            List<String> all = node.xpath(".//text()").all();
            for (String data : all) {
                if (data.contains("来源：")) {
                    source = node.xpath("./em/a/text()").get();
                    if (StringUtils.isBlank(source)) {
                        source = node.xpath("./em/text()").get();
                        if (StringUtils.isBlank(source)) {
                            source = node.xpath("./text()").get();
                            source = StringUtils.isNotBlank(source) ? source.split("：")[1] : "";
                        }
                    }
                }
            }

            if (all.contains("作者：")) {
                author = node.xpath("./text()").get().split("：")[1];
            }
        }

        String releaseTime = httpPage.getHtml().xpath("//span[@class=\"wz-fbtime\"]/text()").get();
        DateFormat df = new SimpleDateFormat("EEE MMM dd HH:mm:ss z yyyy", Locale.ENGLISH);

        List<String> all = httpPage.getHtml().xpath("//div[@class=\"textbox\"]//text()").all();
        StringBuffer content = new StringBuffer();
        for (String data : all) {
            if (StringUtils.isBlank(data) || (StringUtils.isNotBlank(data) && data.contains("*声明："))) {
                continue;
            }
            content.append(data).append(" ");
        }
        List<String> keywordsList = httpPage.getHtml().xpath("//div[@class=\"biaoqian\"]/a/text()").all();
        if (keywordsList != null && keywordsList.size() > 0) {
            requestRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type, keywordsList);
        }

        CrawlerData article = null;
        try {
            Long releseTimeToLong = StringUtils.isNotBlank(releaseTime) ? df.parse(releaseTime.replace("　", "")).getTime() : requestRecord.getReleaseTime();
            article = CrawlerData.builder()
                    .data(requestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .releaseTime(releseTimeToLong)
                    .url(itemUrl)
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Content, content.toString().trim())
                    .addContentKV(Field_Source, source)
                    .build();
        } catch (ParseException e) {
            log.error(e.getMessage());
        }
        return article;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    //获取医药的异步加载页面
    private CrawlerRequestRecord getEntraceHtmlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String sign = httpPage.getHtml().xpath("//div[@class=\"list show\"]/a[@class=\"clickmore\"]/@sign").get();
        if (StringUtils.isBlank(sign)) {
            log.error("sign is null!");
            return null;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        String turnUrl = requestUrl + "article_list_" + sign + ".html";
        CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(turnUrl)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .build();
        return turnRecord;
    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error("sina download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static String washContent(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    private static long washTimeToLong(String time) throws ParseException {
        if(StringUtils.isBlank(time)){
            return 0;
        }

        time = time.trim();
        return DateUtils.parseDate(time, "yyyy-MM-dd HH:mm:ss","发表于 yyyy/MM/dd HH:mm").getTime();
    }

    /**
     * 判断是否在时间范围内
     *
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord, Long releaseTimeToLong) {
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    } else if (hourFromNow != 0) {
                        endTime = System.currentTimeMillis();
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if (startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime) {
                isRange = true;
            }
        } else {
            isRange = true;
        }
        return isRange;
    }

    private String getNextUrl(String requestUrl, String keyword, String page) {
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if (StringUtils.isNotBlank(page) && page.equals(name)) {
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            } else if (StringUtils.isNotBlank(keyword) && keyword.equals(name)) {
                try {
                    nextUrl = nextUrl + name + "=" + URLEncoder.encode(value, "UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
                    log.error(e.getMessage());
                }
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }
        }
        return nextUrl.substring(0, nextUrl.length() - 1);
    }

    public static void main(String[] args) {
        String s = "https://a.sina.cn/t/author/19656553/";

        String substring = s.substring(s.substring(0, s.length() - 1).lastIndexOf("/") + 1);
        System.out.println(substring);


    }
}
