package com.chance.cc.crawler.development.scripts.cctv.search;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;

public class CCTVSearchCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(CCTVSearchCrawlerScript.class);

    private static final String Start_URL = "https://search.cctv.com/search.php\\?qtext=\\S*&type=web";

    private static final String Search_URL = "https://search.cctv.com/search.php\\?qtext=\\S*&sort=date&type=web&vtime=&datepid=1&channel=&page=\\d*";

    private static final String Article_URL = "https://\\S*.cctv.com/\\d{4}/\\d{2}/\\d{2}/\\S*.shtml";

    //类似广告  https://tv.cctv.com/special/zgctjr/chongyang/index.shtml
    private static final String exceptionUrl = "https://tv.cctv.com/special/zgctjr/\\S*/index.shtml";

    private  static  final  String  pictureUrl ="https://\\S*.cctv.com/\\d{4}/\\d{2}/\\d{2}/\\S*.xml";

    public static final String SITE = "search";

    @Override
    public String domain() {
        return "cctv";
    }


    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
       return SITE.equals(site);
//        return  true;

    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(Start_URL);
        addUrlRegular(Search_URL);
        addUrlRegular(Article_URL);
        addUrlRegular(exceptionUrl);
        addUrlRegular(pictureUrl);

    }

    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(crawlerRequestRecord, supportSourceRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }

    private void initKeyword(CrawlerRequestRecord crawlerRequestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get("url");
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String url = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();

                turnRecord.getHttpRequest().addExtra("keyword",keyword);
                crawlerRecords.add(turnRecord);

            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        if (doHttpPageCheck(crawlerRequestRecord, httpPage)) {
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }

        if (url.matches(Search_URL)) {
            parseListLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }

        return parsedLinks;
    }

    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200) {
            log.error("download page {} error, status code is {}", lastRequestUrl, statusCode);
            return true;
        }

        return false;

    }


    private void parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"tuwenjg\"]//ul/li").nodes();String date = httpPage.getHtml().xpath("//div[@class=\"tuwenjg\"]//ul/li["+nodes.size()+"]//span[@class=\"tim\"]").get();
        date = date.substring(date.indexOf("发布时间：") + 5);
        long pageTime = 0;
        try {
            pageTime = DateUtils.parseDate(date, "yyyy-MM-dd HH:mm:ss").getTime();
        } catch (ParseException e) {
            e.printStackTrace();
        }
        for (Selectable node : nodes) {
            String s = node.xpath(".//span[@class=\"tim\"]").get();
            String time = s.substring(s.indexOf("发布时间") + 5);

            String itemUrl = node.xpath(".//h3[@class=\"tit\"]/span/@lanmu1").get();
            long releaseTime = 0;

            try {
                releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd HH:mm:ss").getTime();
                String label = httpPage.getHtml().xpath(".//div[@class=\"pic_01\"]/text()").get();
                if ("图集".equals(label)) {
                    String source = httpPage.getHtml().xpath(".//span[@class=\"src\"]/text()").get().split("：")[1];

                    String pictureText = itemUrl.substring(itemUrl.indexOf("//") + 2, itemUrl.lastIndexOf("."));
                    String pictureTextUrl = "https://" + pictureText + ".xml";
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(pictureTextUrl)
                            .httpUrl(pictureTextUrl)
                            .releaseTime(releaseTime) //对比的时间戳
                            .resultLabelTag(article)
                            .copyBizTags()
                            .build();
                    itemRecord.tagsCreator().bizTags().addCustomKV("another", "another");
                    itemRecord.tagsCreator().bizTags().addCustomKV("time", releaseTime);
                    itemRecord.tagsCreator().bizTags().addCustomKV("source", source);
                    //itemRecord.tagsCreator().bizTags().addCustomKV("title", title.toString());
                    parsedLinks.add(itemRecord);
                } else {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTime) //对比的时间戳
                            .resultLabelTag(article)
                            .copyBizTags()
                            .build();
                    itemRecord.tagsCreator().bizTags().addCustomKV("time", releaseTime);
                    parsedLinks.add(itemRecord);

                }

            } catch (ParseException e) {
                e.printStackTrace();
            }

        }

        //下一页的链接
        String url = httpPage.getRequest().getUrl();
        String page = url.substring(url.lastIndexOf("page=") + 5);
        String newPage = "";
        if (pageTime > System.currentTimeMillis() -  7 * 24 * 60 * 60 * 1000) {
            newPage = (Integer.parseInt(page) + 1) + "";
        }
        if (StringUtils.isNotEmpty(newPage)) {
            String nextPageUrl = url.substring(0,url.lastIndexOf("=") + 1)+newPage;

            CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    //.needWashed(false)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(listRecord);
        }

    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String label = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("another");
        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)) {
            if ("another".equals(label)) {
                return washAnotherArticle(crawlerRequestRecord, httpPage);
            }
            if (url.matches(Article_URL)) {
                return washArticle(crawlerRequestRecord, httpPage);
            }
        }
        return null;
    }

    private List<CrawlerData> washAnotherArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        List<CrawlerData> crawlerDataList = new ArrayList<>();

        String time = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("time");
        String author = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("source");
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/")+1, itemUrl.lastIndexOf("."));
        //String title = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("title");

        String title = httpPage.getHtml().xpath("//ul/@title").get();

        List<String> textList = httpPage.getHtml().xpath("//ul/li/text()").all();
        StringBuffer contents = new StringBuffer();

        for (String s : textList) {
            String s1 = contents.toString();
            if (!s1.equals(s)) {
                contents.append(s);
            }
        }
        List<String> photoUrlList = httpPage.getHtml().xpath("//ul/li/@photourl").all();
        StringBuffer photos = new StringBuffer();
        for (String picture : photoUrlList) {
            if (picture.contains("https:")) {
                photos.append(picture).append("/0X1");
            } else {
                photos.append("https:").append(picture).append("/0X1");
            }
        }

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .url(itemUrl)
                .releaseTime(Long.parseLong(time))
                .addContentKV(AICCommonField.Field_Content, contents.toString().trim())
                .addContentKV(AICCommonField.Field_Title, title.trim())
                .addContentKV(AICCommonField.Field_Author, author.trim())
                .addContentKV(AICCommonField.Field_Images, photos.toString())
                .build();
        crawlerDataList.add(crawlerData);

        return crawlerDataList;
    }

    private List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        List<CrawlerData> crawlerDataList = new ArrayList<>();

        String itemUrl = httpPage.getRequest().getUrl();

        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        String title = httpPage.getHtml().xpath("//div[@id=\"title_area\"]/h1/text()").get();
        if (StringUtils.isEmpty(title)) {
            title = httpPage.getHtml().xpath("//div[@class=\"cnt_bd\"]/h1/text()").get();
        }

        String s = httpPage.getHtml().xpath("//div[@class=\"info1\"]//text()").get();
        String author = "";
        if (StringUtils.isNotEmpty(s)) {
            author = s.substring(0, s.indexOf("|"));
        } else {
            String s1 = httpPage.getHtml().xpath("//span[@class=\"info\"]//i/text()").get();
            if (StringUtils.isNotEmpty(s1)) {
                if (s1.contains("年")) {
                    author = s1.substring(s1.indexOf("：") + 1, s1.indexOf("年") - 5);
                } else {
                    author = s1;
                }
            } else {
                author = httpPage.getHtml().xpath("//div[@class=\"info\"]/span[2]").get();
            }
        }

        String time = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("time");

        List<String> allText = httpPage.getHtml().xpath("//div[@id=\"content_area\"]//p/text()").all();
        StringBuffer contents = new StringBuffer();
        if (allText.size() != 0) {
            for (String s1 : allText) {
                contents.append(s1);
            }
        } else {
            List<String> s2 = httpPage.getHtml().xpath("//div[@class=\"cnt_bd\"]//p/text()").all();
            for (String text : s2) {
                contents.append(text);
            }
        }

        List<String> allPicture = httpPage.getHtml().xpath("//div[@id=\"content_area\"]/p/img/@src").all();
        StringBuffer pictures = new StringBuffer();
        for (String picture : allPicture) {
            if (picture.contains("https:")) {
                pictures.append(picture).append("/0X1");
            } else {
                pictures.append("https:").append(picture).append("/0X1");
            }
        }

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .url(itemUrl)
                .releaseTime(Long.parseLong(time))
                .addContentKV(AICCommonField.Field_Content, contents.toString().trim())
                .addContentKV(AICCommonField.Field_Title, title.trim())
                .addContentKV(AICCommonField.Field_Author, author.trim())
                .addContentKV(AICCommonField.Field_Images, pictures.toString())
                .build();

        crawlerDataList.add(crawlerData);

        //点赞
        String likesNumber = httpPage.getHtml().xpath("//div[@id=\"zanNum\"]/text()").get();
        if (StringUtils.isEmpty(likesNumber)) {
            likesNumber = "";
        }
        CrawlerData interactionData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                .addContentKV(AICCommonField.Field_I_Likes, likesNumber)
                .releaseTime(System.currentTimeMillis())
                .build();
        crawlerDataList.add(interactionData);

        return crawlerDataList;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

}
