package com.chance.cc.crawler.development.scripts.wangyi.health;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Topic_Type;

/**
 * @author lt
 * @version 1.0
 * @date 2021-02-08 11:00:16
 * @email okprog@sina.com
 */
public class WYHealthCrawlerScript extends CrawlerCommonScript {
    private Logger logger = LoggerFactory.getLogger(WYHealthCrawlerScript.class);

    public static final String indexRegex = "https?://jiankang\\.163\\.com/";
    public static final String sportsIndexRegex = "http://sports\\.163\\.com/special/0005rt/sportsgd\\.html";
    public static final String sportsListRegex = "http://sports\\.163\\.com/special/0005rt/sportsgd_\\d*\\.html";
//    public static final String tagListRegex = "https?://jiankang\\.163\\.com/special/\\S*/\\?\\d*";
    public static final String listUrlRegex = "https?://jiankang\\.163\\.com/special/\\S*|https?://jiankang\\.163\\.com/special/\\S*/\\?\\d*";
    public static final String adListUrlRegex = "https://ad\\.163\\.com/";
    public static final String articleUrlRegex = "https?://jiankang\\.163\\.com/\\d*/\\d*/\\d*/\\S*\\.html";
    public static final String commentsRegex = "https://comment\\.api\\.163\\.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/\\S*/comments/newList\\S*";
    public static final String sportsArticleUrlRegex = "https://www\\.163\\.com/sports/article/\\S*.html";
    public static final String newsArticleUrlRegex = "https://www\\.163\\.com/news/article/\\S*.html";

    public static final String sportsArticleUrlFormat = "https://www.163.com/sports/article/%s.html";
    public static final String listUrlFormat = "https://jiankang.163.com/special/%s";
    public static final String sportsListUrlFormat = "http://sports.163.com/special/0005rt/sportsgd_%s.html";
    public static final String interactionFormat = "https://comment.api.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/%s";
    public static final String commentsFormat = "https://comment.api.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/%s/comments/newList?ibc=newspc&limit=30&showLevelThreshold=72&headLimit=1&tailLimit=2&offset=%s";

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        int statusCode = httpPage.getStatusCode();
        String lastRequestUrl = lastRequest.getUrl();
        if (StringUtils.isBlank(httpPage.getRawText()) || !httpPage.isDownloadSuccess() || (statusCode != 200 && statusCode != 404 && statusCode != 401 && statusCode != 422)){
            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.getHttpRequest().getHeaders().remove("Host");
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            logger.error("{} --> this link has been reload status code is :{}",lastRequestUrl, statusCode);
            return parsedLinks;
        }
        if (statusCode == 404 || statusCode == 401 || statusCode == 422){
            logger.error("{} --> this link is {}", lastRequestUrl,statusCode);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.setNeedParsedPage(false);
            return parsedLinks;
        }
        if (lastRequestUrl.matches(indexRegex)){
            return parseIndexLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(adListUrlRegex)){
            return parseAdListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(sportsIndexRegex)){
            return parseSportsIndexLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(listUrlRegex)){
            return parseListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(sportsListRegex)){
            return parseSportsListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex) || lastRequestUrl.matches(sportsArticleUrlRegex) || lastRequestUrl.matches(newsArticleUrlRegex)){
            return parseArticleLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(commentsRegex)){
            return parseCommentLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseAdListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Html html = httpPage.getHtml();

        List<Selectable> itemNodes = html.xpath("//div[@class=\"content\"]/ul/li").nodes();
        for (Selectable itemNode : itemNodes) {
            String articleUrl = itemNode.xpath("./a/@href").get();
            String pubTime = itemNode.xpath("./span/text()").get();
            pubTime = pubTime.replace("(", "").replace(")", "");
            long releaseTime = 0;
            if (pubTime.matches("\\d{2}-\\d{2}\\s*\\d{2}:\\d{2}")){
                Calendar calendar = Calendar.getInstance();
                int year = calendar.get(Calendar.YEAR);
                pubTime = year + "-" + pubTime;
            }
            try {
                releaseTime = DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm").getTime();
            }catch (Exception e){
                logger.error("parse list item date error");
                continue;
            }
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(articleUrl)
                    .recordKey(articleUrl)
                    .releaseTime(releaseTime)
                    .copyBizTags()
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .build();
            Map<String,Object> extras = new HashMap<>();
            extras.put("listUrl",httpRequestUrl);
            itemRecord.getHttpRequest().setExtras(extras);
            parsedLinks.add(itemRecord);
        }

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseSportsListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Html html = httpPage.getHtml();
        String pn = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("_") + 1).split("\\.")[0];
        int pageNo = Integer.parseInt(pn);
        if (pageNo < 20){
            String listUrl = String.format(sportsListUrlFormat,String.format("%02d",(pageNo + 1)));
            CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(listUrl)
                    .recordKey(listUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(listRecord);

            List<Selectable> itemNodes = html.xpath("//ul[@class=\"articleList\"]/li").nodes();
            for (Selectable itemNode : itemNodes) {
                String articleUrl = itemNode.xpath("./a/@href").get();
                String pubTime = itemNode.xpath("./span[@class=\"postTime\"]/text()").get();
                long releaseTime = 0;
                if (pubTime.matches("\\d{2}-\\d{2}\\s*\\d{2}:\\d{2}")){
                    Calendar calendar = Calendar.getInstance();
                    int year = calendar.get(Calendar.YEAR);
                    pubTime = year + "-" + pubTime;
                }
                try {
                    releaseTime = DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm").getTime();
                }catch (Exception e){
                    logger.error("parse list item date error");
                    continue;
                }
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(articleUrl)
                        .recordKey(articleUrl)
                        .releaseTime(releaseTime)
                        .copyBizTags()
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                        .build();
                Map<String,Object> extras = new HashMap<>();
                extras.put("listUrl",httpRequestUrl);
                itemRecord.getHttpRequest().setExtras(extras);
                parsedLinks.add(itemRecord);
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseSportsIndexLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Html html = httpPage.getHtml();
        String listUrl = String.format(sportsListUrlFormat,String.format("%02d",2));
        CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(listUrl)
                .recordKey(listUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();
        parsedLinks.add(listRecord);

        List<Selectable> itemNodes = html.xpath("//ul[@class=\"articleList\"]/li").nodes();
        for (Selectable itemNode : itemNodes) {
            String articleUrl = itemNode.xpath("./a/@href").get();
            String pubTime = itemNode.xpath("./span[@class=\"postTime\"]/text()").get();
            long releaseTime = 0;
            if (pubTime.matches("\\d{2}-\\d{2}\\s*\\d{2}:\\d{2}")){
                Calendar calendar = Calendar.getInstance();
                int year = calendar.get(Calendar.YEAR);
                pubTime = year + "-" + pubTime;
            }
            try {
                releaseTime = DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm").getTime();
            }catch (Exception e){
                logger.error("parse list item date error");
                continue;
            }
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(articleUrl)
                    .recordKey(articleUrl)
                    .releaseTime(releaseTime)
                    .copyBizTags()
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                    .build();
            Map<String,Object> extras = new HashMap<>();
            extras.put("listUrl",httpRequestUrl);
            itemRecord.getHttpRequest().setExtras(extras);
            parsedLinks.add(itemRecord);
        }

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        Map<String, Object> urlParams = getUrlParams(lastRequestUrl);
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        JSONArray comments = pageObj.getJSONArray("commentIds");
        if (comments != null){
            if (comments.size() == 30){
                int offset = Integer.parseInt((String) urlParams.get("offset"));
                String cmtUrl = String.format(commentsFormat,extras.get("articleKey"),(offset + 30));
                CrawlerRequestRecord cmtRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(cmtUrl)
                        .recordKey(cmtUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                cmtRecord.getHttpRequest().setExtras(copyExtras(extras));
                parsedLinks.add(cmtRecord);
            }
            if (comments.size() > 0){
                crawlerRequestRecord.setNeedWashPage(true);
                crawlerRequestRecord.tagsCreator().resultTags().addResultDataType(comment);
                crawlerRequestRecord.tagsCreator().resultTags().addResultDataType(interaction);
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Html html = httpPage.getHtml();
        String lastUrl = httpRequest.getUrl();
        String articleKey = lastUrl.substring(lastUrl.lastIndexOf("/") + 1).split("\\.")[0];
        Map<String,Object> extras = httpRequest.getExtras();
        if (null == extras){
            extras = new HashMap<>();
        }
        extras.put("articleKey",articleKey);

        //内下载互动量
        String interactionUrl = String.format(interactionFormat,articleKey);
        CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(interactionUrl)
                .recordKey(interactionUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(interactionRecord);

        if (crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site").equalsIgnoreCase("health")){
            //评论接口
            String commentUrl = String.format(commentsFormat,articleKey,0);
            CrawlerRequestRecord cmtRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .build();
            cmtRecord.getHttpRequest().setExtras(copyExtras(extras));
            parsedLinks.add(cmtRecord);
        }
        //判断是否采集评论
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
        if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
            if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                logger.error("wangyi news crawler comment need to filter information!");
                return parsedLinks;
            }
            KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);
            //评论接口
            String commentUrl = String.format(commentsFormat,articleKey,0);
            CrawlerRequestRecord cmtRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .build();
            cmtRecord.getHttpRequest().setExtras(copyExtras(extras));
            cmtRecord.setFilter(filterInfoRecord.getFilter());
            cmtRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            parsedLinks.add(cmtRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Html html = httpPage.getHtml();
        String lastUrl = httpRequest.getUrl();
        String nextPageUrl = "";
        Map<String,Object> listExtras = httpRequest.getExtras();
        if (listExtras == null){
            listExtras = new HashMap<>();
        }
        if (lastUrl.contains("?")){
            String api = html.xpath("//div[@id=\"newidx_news_container\"]/@data-channel").get();
            String apiAndPage = api + "_02";
            nextPageUrl = String.format(listUrlFormat,apiAndPage);
            listExtras.put("api",api);
        }else {
            String api = (String) listExtras.get("api");
            String[] pages = lastUrl.split("_");
            String page = pages[pages.length - 1];
            int pn = Integer.parseInt(page);
            pn += 1;
            String newPage = pn < 10 ? "0" + pn : String.valueOf(pn);
            String apiAndPage = api + "_" + newPage;
            nextPageUrl = String.format(listUrlFormat,apiAndPage);
        }
        List<Selectable> itemNodes = html.xpath("//div[@class=\"news_list_container clearfix\"]/div").nodes();
        if (itemNodes != null && itemNodes.size() > 0){
            CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            listRecord.getHttpRequest().setExtras(copyExtras(listExtras));
            parsedLinks.add(listRecord);

            //解析列表
            for (Selectable itemNode : itemNodes) {
                String itemUrl = itemNode.xpath("./div/h2/a/@href").get();
                String pubTime = itemNode.xpath("./div/p[@class=\"pubtime\"]/text()").get();
                if (itemUrl.contains("#")){
                    itemUrl = itemUrl.split("#")[0];
                }
                if (StringUtils.isBlank(pubTime)){
                    logger.error("{} ---> has one item parsed no time",itemUrl);
                    continue;
                }
                List<String> tags = itemNode.xpath("./div/p[@class=\"keywords\"]/a/text()").all();
                try {
                    pubTime = pubTime.trim();
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(itemUrl)
                            .recordKey(itemUrl)
                            .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                            .copyBizTags()
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                            .build();
                    Map<String,Object> extras = new HashMap<>();
                    extras.put("listUrl",lastUrl);
                    itemRecord.getHttpRequest().setExtras(extras);
                    itemRecord.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type,tags);
                    parsedLinks.add(itemRecord);
                }catch (Exception e){
                    logger.error("parse date error");
                }
            }
        }

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseIndexLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        Html html = httpPage.getHtml();
        List<Selectable> allTagNodes = html.xpath("//div[@id=\"index_nav_content\"]/a").nodes();
        for (Selectable allTagNode : allTagNodes) {
            String listUrl = allTagNode.xpath("./@href").get();
            String tagName = allTagNode.xpath("./text()").get();
            List<String> path = new ArrayList<>();
            path.add(tagName);
            CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(listUrl)
                    .recordKey(listUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .notFilterRecord()
                    .build();
            requestRecord.tagsCreator().bizTags().addCustomKV(Field_Path,path);
            parsedLinks.add(requestRecord);
        }
        return parsedLinks;
    }


    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        HttpRequest httpRequest = crawlerRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpRequest internalDownloadRecordHttpRequest = internalDownloadRecord.getHttpRequest();
            String downloadRecordHttpRequestUrl = internalDownloadRecordHttpRequest.getUrl();
            if (downloadRecordHttpRequestUrl.contains("comment")){
                try {
                    JSONObject pageObject = JSONObject.parseObject(internalDownloadRecord.getInternalDownloadPage().getRawText());
//                    String comments = pageObject.getString("rcount");
                    String comments = pageObject.getString("tcount");
                    String likes = pageObject.getString("vote");
                    extras.put("comments",comments);
                    extras.put("likes",likes);
                } catch (Exception e) {
                    extras.put("comments","0");
                    extras.put("likes","0");
                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String articleKey= (String) extras.get("articleKey");
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (StringUtils.isBlank(httpPage.getRawText())){
            return null;
        }
        //清洗文章
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.article)){
            Html html = httpPage.getHtml();
            String title = html.xpath("//h1[@class=\"post_title\"]/text()").get();
            String sourceInfo = html.xpath("//div[@class=\"post_info\"]/text()").get();
            String source = "";
            if (StringUtils.isNotBlank(sourceInfo) && sourceInfo.contains("来源:")){
                try {
                    source = sourceInfo.trim().split("来源:")[1];
                } catch (Exception e) {
                    source = html.xpath("//div[@class=\"post_info\"]/a/text()").get();
                }
            }
            List<String> allContents = html.xpath("//div[@class=\"post_body\"]/p//text()").all();
            StringBuffer sbContent = new StringBuffer();
            for (String allContent : allContents) {
                sbContent.append(allContent);
            }
            List<String> images = html.xpath("//div[@class=\"post_body\"]//img/@src").all();
            StringBuffer sbImage = new StringBuffer();
            for (String image : images) {
                sbImage.append(unescapeHtml2J(image)).append("\\x01");
            }
            String listUrl = extras == null ? null : (String) extras.get("listUrl");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(httpRequest.getUrl())
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_Title,title)
                    .addContentKV(Field_Content,sbContent.toString())
                    .addContentKV(Field_Images,sbImage.toString())
                    .addContentKV(Field_Source,source)
                    .addContentKV(Field_Urls, listUrl)
                    .resultLabelTag(article)
                    .build();
            crawlerDataList.add(crawlerData);
            if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.interaction)){
                String comments = (String) extras.get("comments");
                String likes = (String) extras.get("likes");
                CrawlerData crawlerInteractionData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(httpRequest.getUrl())
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), articleKey))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                        .releaseTime(System.currentTimeMillis())
                        .addContentKV(Field_I_Likes,likes)
                        .addContentKV(Field_I_Comments,comments)
                        .resultLabelTag(interaction)
                        .build();
                crawlerDataList.add(crawlerInteractionData);
            }
        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.comment)){
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONArray commentIds = pageObj.getJSONArray("commentIds");
            JSONObject comments = pageObj.getJSONObject("comments");
            for (Object commentId : commentIds) {
                String commentIdStr = (String) commentId;
                if (commentIdStr.contains(",")){
                    for (String id : commentIdStr.split(",")) {
                        JSONObject cmtObj = comments.getJSONObject(id);
                        String content = cmtObj.getString("content");
                        String contentId = cmtObj.getString("commentId");
                        String pubTime = cmtObj.getString("createTime");
                        String author = cmtObj.getJSONObject("user").getString("nickname");
                        String authorId = cmtObj.getJSONObject("user").getString("userId");
                        CrawlerData crawlerData = null;
                        try {
                            crawlerData = CrawlerData.builder()
                                    .data(crawlerRequestRecord,httpPage)
                                    .url(httpRequest.getUrl())
                                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                                    .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                                    .addContentKV(Field_Content,content)
                                    .addContentKV(Field_Author,author)
                                    .addContentKV(Field_Author_Id,authorId)
                                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                                    .resultLabelTag(comment)
                                    .build();
                            crawlerDataList.add(crawlerData);
                        } catch (ParseException e) {
                            logger.error("parse date error");
                        }
                        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                            String likes = cmtObj.getString("vote");
                            String collection = cmtObj.getString("favCount");
                            CrawlerData crawlerInteractionData = CrawlerData.builder()
                                    .data(crawlerRequestRecord,httpPage)
                                    .url(httpRequest.getUrl())
                                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), contentId))
                                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                                    .releaseTime(System.currentTimeMillis())
                                    .addContentKV(Field_I_Likes,likes)
                                    .addContentKV(Field_I_Collection,collection)
                                    .resultLabelTag(interaction)
                                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                                    .build();
                            crawlerDataList.add(crawlerInteractionData);
                        }
                    }
                }else {
                    JSONObject cmtObj = comments.getJSONObject(commentIdStr);
                    String content = cmtObj.getString("content");
                    String contentId = cmtObj.getString("commentId");
                    String pubTime = cmtObj.getString("createTime");
                    String author = cmtObj.getJSONObject("user").getString("nickname");
                    String authorId = cmtObj.getJSONObject("user").getString("userId");
                    CrawlerData crawlerData = null;
                    try {
                        crawlerData = CrawlerData.builder()
                                .data(crawlerRequestRecord,httpPage)
                                .url(httpRequest.getUrl())
                                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                                .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                                .addContentKV(Field_Content,content)
                                .addContentKV(Field_Author,author)
                                .addContentKV(Field_Author_Id,authorId)
                                .resultLabelTag(comment)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                                .build();
                        crawlerDataList.add(crawlerData);
                    } catch (ParseException e) {
                        logger.error("parse date error");
                    }
                    if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                        String likes = cmtObj.getString("vote");
                        String collection = cmtObj.getString("favCount");
                        CrawlerData crawlerInteractionData = CrawlerData.builder()
                                .data(crawlerRequestRecord,httpPage)
                                .url(httpRequest.getUrl())
                                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), contentId))
                                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), contentId))
                                .releaseTime(System.currentTimeMillis())
                                .addContentKV(Field_I_Likes,likes)
                                .addContentKV(Field_I_Collection,collection)
                                .resultLabelTag(interaction)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                                .build();
                        crawlerDataList.add(crawlerInteractionData);
                    }
                }
            }
        }
        return crawlerDataList;
    }


    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexRegex);
        addUrlRegular(listUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentsRegex);
        addUrlRegular(sportsIndexRegex);
        addUrlRegular(sportsListRegex);
        addUrlRegular(sportsArticleUrlRegex);
        addUrlRegular(newsArticleUrlRegex);
        addUrlRegular(adListUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        String site = crawlerRecord.tagsCreator().bizTags().site();
        if(StringUtils.isNotBlank(site) && site.equals("health")){
            return true;
        }
        return false;
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return "wangyi";
    }

    public static Map<String, Object> copyExtras(Map<String,Object> inExtras){
        Map<String,Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(),entry.getValue());
        }
        return extras;
    }
    /**
     * 将url参数转换成map
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    public static Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = null;
        if (url.contains("?")){
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    public static String unescapeHtml2J(String str){
        int times = 0;
        while (str.contains("&") && str.contains(";")){
            str = StringEscapeUtils.unescapeHtml(str);
            times ++;
            if (times > 5){
                break;
            }
        }
        return str;
    }
}
