package com.chance.cc.crawler.development.scripts.kr36;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import com.chance.cc.crawler.development.scripts.baidu.zhishu.BaiDuZhiShuCrawlerScript;
import com.google.gson.JsonObject;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;


public class Kr36CrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(Kr36CrawlerScript.class);

    public static final String beginUrl = "https://36kr.com/";

    public static final String messageUrl = "https://36kr.com/information/web_news";

    public static final String articleUrl = "https://36kr.com/p/\\d*";

    public static final String commentUrl = "https://gateway.36kr.com/api/mis/page/comment/list";

    public static final String ListUrl = "https://gateway.36kr.com/api/mis/nav/ifm/subNav/flow";

    public static final String Id = "\\d*";

    private static final String information = "information";
    private static final String DOMAIN = "36kr";

    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";


    @Override
    public String domain() {
        return "36kr";
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(beginUrl);
        addUrlRegular(messageUrl);
        addUrlRegular(articleUrl);
        addUrlRegular(commentUrl);
        addUrlRegular(ListUrl);


    }


    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(information);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        List<CrawlerRequestRecord> parseLinksList = new ArrayList<CrawlerRequestRecord>();

        if (httpPage.getStatusCode() != 200 || (!httpPage.isDownloadSuccess())) {
            log.error("download page url == {} error status is {}", httpPage.getRequest().getUrl(), httpPage.getStatusCode());
            //如果没有成功的下载  进行重新下载
            this.requestAgainCrawlerRecord(parseLinksList, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parseLinksList;
        }

        String url = httpPage.getRequest().getUrl();

        String articleId = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");

        if (StringUtils.isNotEmpty(articleId)) {
            if (articleId.matches(Id)) {
                parseLinksList.add(parseCommentLinks(crawlerRequestRecord, httpPage));
            }
        }
        if (url.matches(beginUrl)) {
            return parseBeginLinks(crawlerRequestRecord, httpPage, parseLinksList);
        }
        if (url.matches(messageUrl)) {
            return parseArticleLinks(crawlerRequestRecord, httpPage, parseLinksList);
        }
        if (url.matches(ListUrl)) {
            return parseListLinks(crawlerRequestRecord, httpPage, parseLinksList);
        }


        return parseLinksList;
    }
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200) {
            log.error("download page {} error, status code is {}", lastRequestUrl, statusCode);
            return true;
        }

        return false;

    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinksList) {
        //获取每一个
        List<String> all = httpPage.getJson().jsonPath($_type + ".data.itemList").all();
        for (String s : all) {
            JSONObject jsonObject = JSONObject.parseObject(s);
            String itemId = jsonObject.getString("itemId");
            String url = "https://36kr.com/p/" + itemId;
            String templateMaterial = jsonObject.getString("templateMaterial");
            JSONObject text = JSONObject.parseObject(templateMaterial);
            String publishTime = text.getString("publishTime");

            CrawlerRequestRecord articleRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .recordKey(url)
                    .releaseTime(Long.parseLong(publishTime))
                    .resultLabelTag(article)
                    .copyBizTags()
                    .build();
            articleRecord.setNeedWashPage(true);
            articleRecord.tagsCreator().bizTags().addCustomKV("articleId", itemId);

            parseLinksList.add(articleRecord);
        }

        return parseLinksList;
    }

    private CrawlerRequestRecord parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String articleId = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");
        String e = getParam(articleId);

        HttpRequestBody form = HttpRequestBody.json(e, "UTF-8");

        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .needParsed(false)
                .releaseTime(System.currentTimeMillis())
                .resultLabelTag(comment)
                .copyBizTags()
                .build();
        commentRecord.setNeedWashPage(true);
        commentRecord.tagsCreator().bizTags().addCustomKV("articleId", articleId);

        commentRecord.getHttpRequest().setMethod("post");

        commentRecord.getHttpRequest().setRequestBody(form);

        return commentRecord;
    }


    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinksList) {

        //获取每一个
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"information-flow-item\"]").nodes();

        for (Selectable node : nodes) {

            String Url = node.xpath(".//div[@class=\"kr-shadow-content\"]//a[2]/@href").get();
            Url = "https://36kr.com" + Url;
            String articleId = Url.substring(Url.lastIndexOf("/") + 1);
            if (Url.matches(articleUrl)) {

                String time = node.xpath(".//span[@class=\"kr-flow-bar-time\"]/text()").get();

                //把时间转化
                time = getTime(time);

                try {
                    long releaseTime = DateUtils.parseDate(time, "yyyy/MM/dd HH:mm").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(Url)
                            .recordKey(Url)
                            .releaseTime(releaseTime)
                            .resultLabelTag(article)
                            .copyBizTags()
                            .build();
                    itemRecord.setNeedWashPage(true);
                    itemRecord.tagsCreator().bizTags().addCustomKV("articleId", articleId);
                    parseLinksList.add(itemRecord);

                } catch (ParseException e) {
                    e.printStackTrace();
                }

            }

        }

        //获取post请求中的pageCallback
        String text1 = httpPage.getHtml().get();
        String text2 = text1.substring(text1.lastIndexOf("initialState") + 13);
        String text3 = text2.substring(0, text2.indexOf("</script>"));
        JSONObject jsonObject1 = JSONObject.parseObject(text3);
        String information = jsonObject1.getString("information");
        JSONObject jsonObject2 = JSONObject.parseObject(information);
        String informationList = jsonObject2.getString("informationList");
        JSONObject jsonObject3 = JSONObject.parseObject(informationList);
        String pageCallback = jsonObject3.getString("pageCallback");

        //post请求需要的参数
        JsonObject post = new JsonObject();
        JsonObject params = new JsonObject();
        post.add("param", params);
        post.addProperty("partner_id", "web");

        params.addProperty("pageCallback", pageCallback);
        params.addProperty("pageEvent", "1");
        params.addProperty("pageSize", "30");
        params.addProperty("platformId", "2");
        params.addProperty("siteId", "1");
        params.addProperty("subnavNick", "web_news");
        params.addProperty("subnavType", "1");
        HttpRequestBody form = HttpRequestBody.json(String.valueOf(post), "UTF-8");


        CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(ListUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();

        parseLinksList.add(listRecord);
        listRecord.getHttpRequest().setMethod("post");

        listRecord.getHttpRequest().setRequestBody(form);

        return parseLinksList;
    }

    //评论页  post请求需要的参数
    private String getParam(String articleId) {

        JsonObject e = new JsonObject();
        JsonObject params = new JsonObject();
        e.add("param", params);
        e.addProperty("partner_id", "web");

        params.addProperty("itemId", articleId);
        //params.addProperty("itemId", "1431758771608840");
        params.addProperty("itemType", "10");
        params.addProperty("pageEvent", "0");
        params.addProperty("pageSize", "20");
        params.addProperty("platformId", "2");
        params.addProperty("siteId", "1");
        return e.toString();
    }

    private List<CrawlerRequestRecord> parseBeginLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinksList) {
        String url = httpPage.getHtml().xpath("//a[@id=\"information\"]/@href").get();
        url = "https://36kr.com" + url;

        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .recordKey(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();

        parseLinksList.add(itemRecord);
        return parseLinksList;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)) {
            return washComment(crawlerRequestRecord, httpPage);
        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)) {
            return washArticle(crawlerRequestRecord, httpPage);
        }
        return null;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();

        String articleId = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("articleId");

        List<String> all = httpPage.getJson().jsonPath($_type + ".data.commentList").all();

        for (String s : all) {
            JSONObject jsonObject = JSONObject.parseObject(s);
            String userId = jsonObject.getString("userId");
            String userNick = jsonObject.getString("userNick");
            String content = jsonObject.getString("content");
            String publishTime = jsonObject.getString("publishTime");
            String likeCount = jsonObject.getString("statPraise");
            long time = Long.parseLong(publishTime);

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), userId))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleId))
                    .resultLabelTag(comment)
                    .addContentKV(Field_Author, userNick)
                    .addContentKV(Field_Author_Id, userId)
                    .addContentKV(Field_Content, content.trim())
                    .addContentKV(Field_I_Likes, likeCount)
                    .releaseTime(time)
                    .build();
            crawlerDataList.add(crawlerData);


            String subCommentList = jsonObject.getString("subCommentList");
            if (StringUtils.isNotEmpty(subCommentList)) {
                List list = JSONObject.parseObject(subCommentList, List.class);
                int size = list.size();

                for (int i = 0; i <= size - 1; i++) {
                    String commentText = (String) list.get(i);
                    JSONObject commentJson = JSONObject.parseObject(commentText);
                    String commentUserId = commentJson.getString("userId");
                    String commentUserNick = commentJson.getString("userNick");
                    String commentContent = commentJson.getString("content");
                    String commentPublishTime = commentJson.getString("publishTime");
                    long commentTime = Long.parseLong(commentPublishTime);
                    String commentLikeCount = commentJson.getString("statPraise");

                    CrawlerData commentData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentUserId))
                            .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), userId))
                            .resultLabelTag(comment)
                            .addContentKV(Field_Author, commentUserNick)
                            .addContentKV(Field_Author_Id, commentUserId)
                            .addContentKV(Field_Content, commentContent.trim())
                            .addContentKV(Field_I_Likes, commentLikeCount)
                            .releaseTime(commentTime)
                            .build();
                    crawlerDataList.add(commentData);
                }
            }
        }

//        if (all.size() == 0) {
//            CrawlerData crawlerData = CrawlerData.builder()
//                    .data(crawlerRequestRecord, httpPage)
//                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), articleId))
//                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleId))
//                    .addContentKV(Field_Author, "")
//                    .addContentKV(Field_Author_Id, "")
//                    .addContentKV(Field_Content, "")
//                    .addContentKV(Field_I_Likes, "")
//                    .releaseTime(System.currentTimeMillis())
//                    .build();
//            crawlerDataList.add(crawlerData);
//        }

        return crawlerDataList;
    }

    private List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        List<CrawlerData> crawlerDataList = new ArrayList<>();

        if (httpPage.getRequest().getUrl().matches(articleUrl)) {
            //https://36kr.com/p/1431712293543177
            String itemUrl = httpPage.getRequest().getUrl();
            String articleId = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);

            String title = httpPage.getHtml().xpath("//h1[@class=\"article-title margin-bottom-20 common-width\"]").get();

            String author = httpPage.getHtml().xpath("//a[@class=\"title-icon-item item-a\"]").get();

            String authorId = httpPage.getHtml().xpath("//a[@class=\"title-icon-item item-a\"]/@href").get();

            String author_id = authorId.substring(authorId.lastIndexOf("/") + 1);

            String text1 = httpPage.getHtml().xpath("//div[@class=\"summary\"]/text()").get();

            String articleTime = httpPage.getHtml().xpath("//span[@class=\"title-icon-item item-time\"]/text()").get();
            articleTime = getTime(articleTime);

            //这个只能得到h2的标题
            // List<String> h2Text = httpPage.getHtml().xpath("//div[@class=\"common-width content articleDetailContent kr-rich-text-wrapper\"]//h2//text()").all();

            //选择 p跟h2 的父级 就可以获取到所有的文本
            List<String> text2 = httpPage.getHtml().xpath("//div[@class=\"common-width content articleDetailContent kr-rich-text-wrapper\"]//text()").all();


            StringBuffer contents1 = new StringBuffer();
            for (String s : text2) {
                contents1.append(s);
            }


            String text = text1 + contents1;
            text = text.replaceAll("&amp;ldquo;", "“");
            text = text.replaceAll("&amp;rdquo;", "”");
            text = text.replaceAll("\\n", "");
            text = text.replaceAll("  ", "");

            //获取图片
            List<String> pictureList = httpPage.getHtml().xpath("//div[@class=\"common-width margin-bottom-20\"]//img/@src").all();

            StringBuffer pictures = new StringBuffer();
            for (String picture : pictureList) {
                if (picture.contains("https:")) {
                    pictures.append(picture).append("/0X1");
                } else {
                    pictures.append("https:").append(picture).append("/0X1");
                }
            }
            if (pictureList.size() == 0) {
                pictures.append(" ");
            }

            //洗文章
            try {
                CrawlerData articleData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleId))
                        .resultLabelTag(article)
                        .url(itemUrl)
                        .releaseTime(DateUtils.parseDate(articleTime, "yyyy/MM/dd HH:mm").getTime())
                        .addContentKV(AICCommonField.Field_Content, text.trim())
                        .addContentKV(AICCommonField.Field_Title, title.trim())
                        .addContentKV(AICCommonField.Field_Author, author.trim())
                        .addContentKV(Field_Author_Id, author_id.trim())
                        .addContentKV(AICCommonField.Field_Images, pictures.toString().trim())
                        .build();
                crawlerDataList.add(articleData);
            } catch (ParseException e) {
                e.printStackTrace();
            }

            //点赞数
            String likesCount = httpPage.getHtml().xpath("//div[@class=\"thumbNum normalColor\"]").get();
            //评论数
            String commentCount = httpPage.getHtml().xpath("//b[@class=\"comment-count item-count-common\"]").get();

            //洗互动量
            CrawlerData interactionData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleId))
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleId))
                    .resultLabelTag(interaction)
                    .addContentKV(AICCommonField.Field_I_Comments, commentCount)
                    .addContentKV(AICCommonField.Field_I_Likes, likesCount)
                    .releaseTime(System.currentTimeMillis())
                    .build();
            crawlerDataList.add(interactionData);

        }

        return crawlerDataList;
    }

    private String getTime(String releaseTime) {
        if (releaseTime.contains("秒前")) {
            String timeNumber = releaseTime.split("秒")[0];
            long l = System.currentTimeMillis()-Integer.parseInt(timeNumber) * 1000;
            SimpleDateFormat format = new SimpleDateFormat("yyyy/MM/dd HH:mm");
            releaseTime = format.format(new Date(l));
        }
        if (releaseTime.contains("分钟前")) {
            String time = releaseTime.substring(0, releaseTime.lastIndexOf("分钟"));
            long l = System.currentTimeMillis() - (Integer.parseInt(time) * 60 * 1000);
            SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm");
            releaseTime = simpleDateFormat.format(new Date(l));
        }
        if (releaseTime.contains("小时前")) {
            String timeNumber = releaseTime.substring(0, releaseTime.lastIndexOf("小时"));
            long l = System.currentTimeMillis() - (Integer.parseInt(timeNumber) * 60 * 60 * 1000);
            SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm");
            releaseTime = simpleDateFormat.format(new Date(l));
        }
        return releaseTime;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /*
     * 重新下载
     * */
    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 10) {
                log.error("pcauto download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }


}
