package com.chance.cc.crawler.development.scripts.ocn;

import com.alibaba.fastjson.JSON;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Topic_Type;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/6/17 14:20
 * @Description 中投网 板块开发
 **/
public class OcnModuleCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(OcnModuleCrawlerScript.class);
    private static final String DOMAIN = "ocn";
    private static final String SITE_START = "module";
    private static final String REQUEST_AGAIN_TAG = "ocn_request_retry";
    private static final String PREFIX = "http://www.ocn.com.cn";
    private static final String DOMAIN_RESULT_JSON_RECORD_TAG = "domain_result_json";//初始record结果字段
    private static final String KAFKA = "kafka";//数据放入kafka中
    private static final String MYSQL = "mysql";//数据放入mysql中

    private static final String ENTRANCE_URL = "http[s]*://www.ocn.com.cn/[a-z]+/";
    private static final String MODULE_URL = "http://www.ocn.com.cn/[a-z]+/[a-z]+/\\d*";
    private static final String ITEM_URL = "http://www.ocn.com.cn/[a-z\\/]+/\\d{6}/[a-z0-9]*.shtml";


    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(MODULE_URL);
        addUrlRegular(ITEM_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return site.startsWith(SITE_START);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> requestRecords = new ArrayList<>();
        getSycItemUrlRecord(requestRecord,requestRecords);
        return requestRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() != 200) {
            log.error("{} status code : {}", requestUrl, httpPage.getStatusCode());
            if (httpPage.getStatusCode() != 404) {
                requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            }
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (requestUrl.matches(ENTRANCE_URL)) {
            entranceUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(MODULE_URL)) {
            moduleUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(ITEM_URL)) {
            itemUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }
        return parsedLinks;
    }

    private void moduleUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //翻页
        String nextUrl = httpPage.getHtml().xpath("//a[text()='下一页']/@href").get();
        if (StringUtils.isNotBlank(nextUrl)) {
            nextUrl = PREFIX + nextUrl;
            CrawlerRequestRecord nextRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(nextRecord);
        }

        //详情
        List<String> urls = httpPage.getHtml().xpath("//ul[@class=\"infolist\"]/li/div[@class=\"title\"]/a/@href").all();
        for (String url : urls) {
            if (StringUtils.isBlank(url)) {
                continue;
            }
            url = PREFIX + url;
            CrawlerRequestRecord requestReocrd = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .needWashed(false)
                    .copyResultTags()
                    .build();
            parsedLinks.add(requestReocrd);
        }
    }

    private void entranceUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        //板块解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"iframe3s\"]//div[@class=\"title\"]").nodes();
        for (Selectable node : nodes) {
            String moduleName = node.xpath("./strong").get();
            String moduleUrl = node.xpath("./span[@class=\"more\"]/a/@href").get();
            if (StringUtils.isBlank(moduleUrl)) {
                continue;
            }
            moduleUrl = requestUrl + moduleUrl;
            CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(moduleUrl)
                    .releaseTime(System.currentTimeMillis())
                    .httpHeads(httpPage.getRequest().getHeaders())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            requestRecord.getHttpRequest().setExtras(httpPage.getRequest().getExtras());
            List<String> pathList = new ArrayList();
            pathList.add(requestUrl);
            pathList.add(moduleUrl);
            requestRecord.tagsCreator().bizTags().addCustomKV(Field_Path, pathList);
            requestRecord.tagsCreator().bizTags().addCustomKV(Tag_Site_Info, moduleName);
            parsedLinks.add(requestRecord);
        }


        //主页面板块列表解析
        List<Selectable> nodes1 = httpPage.getHtml().xpath("//div[@class=\"iframe2s\"]//div[@class=\"rights\"]").nodes();
        for (Selectable node : nodes1) {
            String moduleName = node.xpath(".//div[@class=\"title\"]/strong").get();
            List<String> all = node.xpath(".//div[contains(@class,'news')]//a/@href").all();
            for (String url : all) {
                url = PREFIX + url;
                CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .needWashed(false)
                        .copyResultTags()
                        .build();
                List<String> pathList = new ArrayList<>();
                pathList.add(requestUrl);
                requestRecord.tagsCreator().bizTags().addCustomKV(Field_Path, pathList);
                requestRecord.tagsCreator().bizTags().addCustomKV(Tag_Site_Info, moduleName);
                parsedLinks.add(requestRecord);
            }
        }
    }

    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        List<String> all = httpPage.getHtml().xpath("//div[@class=\"date\"]/span/text()").all();
        for (String data : all) {
            if (data.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}")) {
                try {
                    long timeToLong = washTimeToLong(data);
                    boolean dateRange = isDateRange(crawlerRequestRecord, timeToLong);
                    if (dateRange) {
                        crawlerRequestRecord.setNeedWashPage(true);
                        crawlerRequestRecord.setReleaseTime(timeToLong);
                    }
                } catch (Exception e) {
                    log.error(e.getMessage());
                }
            }

            if (data.contains("来源：")) {
                String source = "";
                if (data.matches("来源：\\S+")) {
                    String[] split = data.split("：");
                    source = split[1];
                } else {
                    //还未发现其他情况
                }
                crawlerRequestRecord.getHttpRequest().addExtra("source", source);
            }
        }

        //文章存在翻页的情况


    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags resultTags = crawlerRecord.tagsCreator().resultTags();

        if (resultTags.hasDataType(CrawlerEnum.CrawlerDataType.article)) {
            crawlerDataList.addAll(washArticle(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washArticle(CrawlerRequestRecord requestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));
        String site = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");

        String title = httpPage.getHtml().xpath("//h1/text()").get();
        String source = (String) httpPage.getRequest().getExtras().get("source");
        String author = httpPage.getHtml().xpath("//div[@class=\"date\"]/span[@class=\"blue\"]/a").get();
        List<String> all = httpPage.getHtml().xpath("//div[@id=\"ncontent\"]//p//text()").all();
        StringBuffer contentBf = new StringBuffer();
        for (String s : all) {
            if (StringUtils.isBlank(s)) {
                continue;
            }
            contentBf.append(s).append(" ");
        }

        String keyword = httpPage.getHtml().xpath("//div[@class=\"keyword\"]/span/text()").get();
        if (StringUtils.isNotBlank(keyword)) {
            String[] list = keyword.split("：")[1].split(" ");
            requestRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type, list);
        }
        List<String> imgs = httpPage.getHtml().xpath("//div[@id=\"artibody\"]//img/@src").all();

        CrawlerData article = CrawlerData.builder()
                .data(requestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", requestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                .releaseTime(requestRecord.getReleaseTime())
                .url(itemUrl)
                .addContentKV(Field_Title, title)
                .addContentKV(Field_Author, author)
                .addContentKV(Field_Source, source)
                .addContentKV(Field_Content, contentBf.toString())
                .addContentKV(Field_Images, imgs.toString())
                .flowInPipelineTag(KAFKA)
                .build();
        article.setFilterPipelineResult(true);
        crawlerDataList.add(article);

        CrawlerBusinessTags crawlerBusinessTags = requestRecord.tagsCreator().bizTags();
        CrawlerData sycResult = CrawlerData.builder()
                .data(requestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", requestRecord.getDomain(),site, articleKey))
                .releaseTime(requestRecord.getReleaseTime())
                .url(itemUrl)
                .addContentKV("itemUrl", itemUrl)
                .addContentKV("releaseTimeToLong", String.valueOf(requestRecord.getReleaseTime()))
                .addContentKV(Tag_Site_Info,crawlerBusinessTags.getCategoryTag().getKVTagStrVal(Tag_Site_Info))
                .addContentKV(Field_Path,crawlerBusinessTags.getCategoryTag().getKVTagStrVal(Field_Path))
                .flowInPipelineTag(MYSQL)
                .build();
        sycResult.setFilterPipelineResult(true);
        crawlerDataList.add(sycResult);
        return crawlerDataList;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void getSycItemUrlRecord(CrawlerRequestRecord requestRecord,List<CrawlerRecord> crawlerRecords){
        if (requestRecord.tagsCreator().bizTags().hasKVTag(DOMAIN_RESULT_JSON_RECORD_TAG)) {
            KVTag domainResultJson = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(DOMAIN_RESULT_JSON_RECORD_TAG);
            CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()), CrawlerDomainUrls.class);
            String url = crawlerDomainUrls.getUrl();
            Json urlJson = new Json(url);
            String itemUrl = urlJson.jsonPath($_type + ".itemUrl").get();
            long releaseTimeToLong = Long.parseLong(urlJson.jsonPath($_type + ".releaseTimeToLong").get());
            if(!isDateRange(requestRecord,releaseTimeToLong)){
                return ;
            }
            requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(DOMAIN_RESULT_JSON_RECORD_TAG);

            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(requestRecord)
                    .httpUrl(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            String siteInfo = urlJson.jsonPath($_type + "."+Tag_Site_Info).get();
            String filed_path = urlJson.jsonPath($_type + "."+ Field_Path).get();
            itemRecord.tagsCreator().bizTags().addCustomKV(Tag_Site_Info,siteInfo);
            itemRecord.tagsCreator().bizTags().addCustomKV(Field_Path,filed_path);
            crawlerRecords.add(itemRecord);
        }
    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error("sina download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private static String washContent(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    private static long washTimeToLong(String time) throws ParseException {
        if (StringUtils.isBlank(time)) {
            return 0;
        }

        time = time.trim();
        return DateUtils.parseDate(time, "yyyy-MM-dd HH:mm:ss", "发表于 yyyy/MM/dd HH:mm", "yyyy年MM月dd日 HH:mm","yyyy-MM-dd HH:mm").getTime();
    }

    /**
     * 判断是否在时间范围内
     *
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord, Long releaseTimeToLong) {
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    } else if (hourFromNow != 0) {
                        endTime = System.currentTimeMillis();
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if (startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime) {
                isRange = true;
            }
        } else {
            isRange = true;
        }
        return isRange;
    }

    public static void main(String[] args) {
        String s = "https://a.sina.cn/t/author/19656553/";

        String substring = s.substring(s.substring(0, s.length() - 1).lastIndexOf("/") + 1);
        System.out.println(substring);


    }
}
