package com.chance.cc.crawler.development.scripts.pcauto.video;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2021-01-15 13:34:05
 * @email okprog@sina.com
 */
public class PcAutoVideoCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(PcAutoVideoCrawlerScript.class);

    public static final String indexRegex = "https?://pcauto\\.pcvideo\\.com\\.cn/";
    public static final String tagUrlRegex = "https?://pcauto\\.pcvideo\\.com\\.cn/c\\d*/|https?://pcauto\\.pcvideo\\.com\\.cn/org/all/";
    public static final String listUrlRegex = "https?://pcauto\\.pcvideo\\.com\\.cn/c\\d*/p\\d*/|https?://pcauto\\.pcvideo\\.com\\.cn/org/all/p\\d*/";
    public static final String videoRegex = "https?://pcauto\\.pcvideo\\.com\\.cn/video-\\d*.html";
    private static final String cmtCountUrlRegex = "https?://cmt\\.pcauto\\.com\\.cn/action/topic/get_data\\.jsp\\?url=\\S*";
    private static final String commentRegex = "https?://cmt\\.pcauto\\.com\\.cn/action/comment/list_good_json\\.jsp\\S*";

    public static final String cmtCountUrlFormat = "https://cmt.pcauto.com.cn/action/topic/get_data.jsp?url=%s";
    public static final String commentUrlFormat = "https://cmt.pcauto.com.cn/action/comment/list_good_json.jsp?&urlHandle=1&url=%s&pageNo=%s&pageSize=30&voteId=0";

    private static final String scriptSite = "video";

    @Override
    public String domain() {
        return "pcauto";
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexRegex);
        addUrlRegular(tagUrlRegex);
        addUrlRegular(listUrlRegex);
        addUrlRegular(videoRegex);
        addUrlRegular(commentRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        int statusCode = httpPage.getStatusCode();
        String lastRequestUrl = lastRequest.getUrl();
        if (StringUtils.isBlank(httpPage.getRawText()) || !httpPage.isDownloadSuccess() || (statusCode != 200 && statusCode != 404)) {
            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            return parsedLinks;
        }
        if (statusCode == 404) {
            logger.error("页面不存在：" + statusCode);
            return parsedLinks;
        }
        if (lastRequestUrl.matches(indexRegex)) {
            return parseIndexLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(tagUrlRegex) || lastRequestUrl.matches(listUrlRegex)) {
            return parseListLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        if (lastRequestUrl.matches(videoRegex)) {
            return parseArticleLinks(crawlerRequestRecord, parsedLinks, lastRequest);
        }
        if (lastRequestUrl.matches(commentRegex)) {
            return parseCommentLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        int pageCount = pageObj.getIntValue("pageCount");
        int pageNo = pageObj.getIntValue("pageNo");
        if (pageNo < pageCount) {
            int nextPageNo = pageNo + 1;
            String articleUrl = (String) extras.get("articleUrl");
            String commentUrl = String.format(commentUrlFormat, articleUrl, nextPageNo);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .needWashed(true)
                    .needParsed(true)
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .build();
            Map<String, Object> cmtExtras = copyExtras(extras);
            commentRecord.getHttpRequest().setExtras(cmtExtras);
            parsedLinks.add(commentRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest) {
        Map<String, Object> listExtras = lastRequest.getExtras();
        //内下载评论数
        String cmtCountUrl = String.format(cmtCountUrlFormat, lastRequest.getUrl());
        CrawlerRequestRecord cmtCountRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(cmtCountUrl)
                .recordKey(cmtCountUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(cmtCountRecord);

        //评论翻页
        String commentUrl = String.format(commentUrlFormat, crawlerRequestRecord.getHttpRequest().getUrl(), 1);
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .recordKey(commentUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .needParsed(true)
                .resultLabelTag(comment)
                .resultLabelTag(interaction)
                .build();
        Map<String, Object> cmtExtras = copyExtras(listExtras);
        commentRecord.getHttpRequest().setExtras(cmtExtras);
        parsedLinks.add(commentRecord);
        return parsedLinks;
    }


    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String nextPageUrl = httpPage.getHtml().xpath("//a[@class=\"next\"]/@href").get();
        if (nextPageUrl.startsWith("//")) {
            nextPageUrl = "https:" + nextPageUrl;
        }
        CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextPageUrl)
                .recordKey(nextPageUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();
        parsedLinks.add(nextPageRecord);

        List<Selectable> itemNodes = httpPage.getHtml().xpath("//ul[@class=\"list clearfix\"]/li").nodes();
        for (Selectable itemNode : itemNodes) {
            String itemUrl = itemNode.xpath("./a/@href").get();
            if (itemUrl.startsWith("//")) {
                itemUrl = "https:" + itemUrl;
            }
            String imgUrl = itemNode.xpath("./a/img/@src").get();
            if (imgUrl.matches("\\S*\\d{11}\\S*")) {
                long releaseTime = washTime(imgUrl);
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(releaseTime)
                        .copyBizTags()
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                Map<String, Object> extras = new HashMap<>();
                extras.put("listUrl", crawlerRequestRecord.getHttpRequest().getUrl());
                extras.put("articleUrl", itemUrl);
                itemRecord.getHttpRequest().setExtras(extras);
                parsedLinks.add(itemRecord);
            } else {
                logger.error("can not parse date from : " + imgUrl);
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseIndexLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        List<Selectable> tagsNodes = httpPage.getHtml().xpath("//div[@class=\"nav\"]/ul/li/a").nodes();
        for (Selectable tagsNode : tagsNodes) {
            String itemUrl = tagsNode.xpath("./@href").get();
            if (itemUrl.startsWith("//")) {
                itemUrl = "https:" + itemUrl;
            }
            if (itemUrl.matches(tagUrlRegex)) {
                if (itemUrl.contains("c0")) {
                    itemUrl = itemUrl.replace("c0", "org/all");
                }
                if (itemUrl.contains("c6")) {
                    continue;
                }
                String tagName = tagsNode.xpath("./text()").get();
                List<String> path = new ArrayList<>();
                path.add(tagName);
                CrawlerRequestRecord tagRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .build();
                tagRecord.tagsCreator().bizTags().addCustomKV(Field_Path, path);
                parsedLinks.add(tagRecord);
            }
        }
        return parsedLinks;
    }

    private long washTime(String imgUrl) {
        Matcher mtTime = Pattern.compile("\\d{13}").matcher(imgUrl);
        while (mtTime.find()) {
            return new Long(mtTime.group(0));
        }
        return 0;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String lastRequestUrl = httpRequest.getUrl();
        if (lastRequestUrl.matches(videoRegex)) {
            for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
                String internalUrl = internalDownloadRecord.getHttpRequest().getUrl();
                if (internalUrl.matches(cmtCountUrlRegex)) {
                    HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
                    JSONObject pageObj = JSONObject.parseObject(internalDownloadPage.getRawText());
                    String comments = pageObj.getString("total");
                    extras.put("comments", comments);
                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)) {
            crawlerDataList.addAll(washArticle(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)) {
            crawlerDataList.addAll(washInteraction(crawlerRequestRecord, httpPage));
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRequestRecord, httpPage));
        }

        return crawlerDataList;
    }

    private List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = lastRequest.getExtras();
        CrawlerBusinessTags bizTags = crawlerRequestRecord.tagsCreator().bizTags();
        CategoryTag categoryTag = bizTags.getCategoryTag();
        Html html = httpPage.getHtml();

        String articleKey = lastRequest.getUrl().substring(lastRequest.getUrl().lastIndexOf("-") + 1).split("\\.")[0];
        String title = html.xpath("//h1[@class=\"topic-title\"]/text()").get();

        List<String> allContents = html.xpath("//div[@class=\"video-info\"]//dl[1]/dd[@class=\"txt\"]//text()").all();
        StringBuffer sbContent = new StringBuffer();
        for (String s : allContents) {
            sbContent = sbContent.append(s).append(" ");
        }

        //太平洋视频除了原创都没有作者
        String author = "";
        String authorId = "";
        String follows = "";
        List<Selectable> authorNodes = html.xpath("//div[@class=\"dingyue\"]").nodes();
        if (authorNodes.size() > 0) {
            author = html.xpath("//p[@class=\"name\"]/a/span[1]/text() | //div[@class=\"dingyue\"]/div/p/a/em/text()").get();
            follows = html.xpath("//div[@class=\"dingyue\"]/div/div/em/text()").get();
        }

        if (StringUtils.isBlank(follows)) {
            follows = "";
        }
        String audioUrl = html.xpath("//video/source[1]/@src").get();

        List<String> topicType = html.xpath("/div[@class=\"video-info\"]//dl[2]/dd/a/text()").all();
        if (null != topicType && topicType.size() > 0) {
            categoryTag.addKVTag(Tag_Field_Topic_Type, topicType);
        }

        //添加文章车系信息标签
        List<Map<String, String>> seriesList = new ArrayList<>();
        String carInfo = "";
        Matcher mtSeries = Pattern.compile("var\\s*serialData\\s*=\\s*\\{([^;])*\\}").matcher(httpPage.getRawText());
        while (mtSeries.find()) {
            carInfo = mtSeries.group(0).split("serialData =")[1].trim();
        }
        if (StringUtils.isNotBlank(carInfo)){
            JSONObject carObj = JSONObject.parseObject(carInfo);
            String seriesName = carObj.getString("sgName");
            String seriesId = carObj.getString("sgId");
            String seriesUrl = carObj.getString("sgUrl");
            Map<String,String> series = new HashMap<>();
            series.put("series_id",seriesId);
            series.put("series_name",seriesName);
            series.put("series_url",seriesUrl);
            seriesList.add(series);
        }
        try {
            String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
            CrawlerData crawlerArticleData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .url(lastRequest.getUrl())
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author_Follows, follows)
                    .addContentKV(Field_Content, sbContent.toString())
                    .addContentKV(Field_Audios, audioUrl)
                    .resultLabelTag(article)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .build();
            crawlerArticleData.tagsCreator().bizTags().addSiteBiz("video");
            if (seriesList.size() > 0){
                crawlerArticleData.tagsCreator().bizTags().addCustomKV(Tag_Field_Series,seriesList);
            }
            crawlerArticleDataList.add(crawlerArticleData);
        } catch (Exception e) {
            logger.error(e.getMessage(), "parse date error");
        }
        return crawlerArticleDataList;
    }

    private List<CrawlerData> washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerInteractionDataList = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        Map<String, Object> extras = lastRequest.getExtras();
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String articleKey = lastRequest.getUrl().substring(lastRequest.getUrl().lastIndexOf("-") + 1).split("\\.")[0];

        if (lastRequestUrl.matches(videoRegex)) {
            String comments = (String) extras.get("comments");
            String likes = httpPage.getHtml().xpath("//li[@class=\"video-vote video-vote-up\"]/span[@class=\"vote-txt txt\"]/text()").get();
            String views = httpPage.getHtml().xpath("//li[@class=\"last\"]/span[@class=\"txt\"]/text()").get();
            if (views.contains("万")){
                views = views.replace("万","");
                if (views.contains(".")){
                    views = views.replace(".","") + "000";
                }else {
                    views = views + "0000";
                }

            }
            try {
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), articleKey))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                        .url(lastRequestUrl)
                        .releaseTime(crawlerRequestRecord.getReleaseTime())
                        .addContentKV(Field_I_Comments, comments)
                        .addContentKV(Field_I_Views, views)
                        .addContentKV(Field_I_Likes, likes)
                        .resultLabelTag(interaction)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .build();
                crawlerData.tagsCreator().bizTags().addSiteBiz("video");
                crawlerInteractionDataList.add(crawlerData);
            } catch (Exception e) {
                logger.error(e.getMessage(), e);
            }
        }
        if (lastRequestUrl.matches(commentRegex)) {
            JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
            JSONArray commentList = jsonObject.getJSONArray("data");
            for (Object o : commentList) {
                JSONObject commentJson = (JSONObject) o;
                String commentId = commentJson.getString("id");
                String likes = commentJson.getString("support");
                String pubTime = commentJson.getString("createTime");

                try {
                    CrawlerData crawlerData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), commentId))
                            .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                            .url(lastRequest.getUrl())
                            .releaseTime(DateUtils.parseDate(pubTime, "yyyy-MM-dd HH:mm:ss").getTime())
                            .addContentKV(Field_I_Likes, likes)
                            .resultLabelTag(interaction)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .build();
                    crawlerData.tagsCreator().bizTags().addSiteBiz("video");
                    crawlerData.setFilter(CrawlerEnum.CrawlerRecordFilter.keyOrDateRange);
                    crawlerInteractionDataList.add(crawlerData);
                } catch (ParseException e) {
                    logger.error(e.getMessage(), "parse date error");
                }
            }
        }
        return crawlerInteractionDataList;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerCommentDataList = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = lastRequest.getExtras();
        JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
        JSONArray commentList = jsonObject.getJSONArray("data");
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String articleUrl = (String) extras.get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("-") + 1).split("\\.")[0];
        for (Object o : commentList) {
            JSONObject commentJson = (JSONObject) o;
            String commentId = commentJson.getString("id");
            String pubTime = commentJson.getString("createTime");
            String author = commentJson.getString("nickName");
            String authorId = commentJson.getString("userId");
            String content = commentJson.getString("content");
            String floor = commentJson.getString("floor");
            try {
                CrawlerData crawlerCommentData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                        .url(articleUrl)
                        .releaseTime(DateUtils.parseDate(pubTime, "yyyy-MM-dd HH:mm:ss").getTime())
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Author_Id, authorId)
                        .addContentKV(Field_Content, content)
                        .addContentKV(Field_Floor, floor)
                        .resultLabelTag(comment)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .build();
                crawlerCommentData.tagsCreator().bizTags().addSiteBiz("video");
                crawlerCommentData.setFilter(CrawlerEnum.CrawlerRecordFilter.keyOrDateRange);
                crawlerCommentDataList.add(crawlerCommentData);
            } catch (ParseException e) {
                logger.error(e.getMessage(), "parse date error");
            }
        }
        return crawlerCommentDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    public static Map<String, Object> copyExtras(Map<String, Object> inExtras) {
        Map<String, Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(), entry.getValue());
        }
        return extras;
    }

}
