package com.chance.cc.crawler.development.scripts.tenxun;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.meta.core.bean.crawler.CrawlerDomainUrls;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.URLEncoder;
import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2021-03-24 13:25:08
 * @email okprog@sina.com
 */
public class TenXunNewsCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(TenXunNewsCrawlerScript.class);

    public static final String indexUrlRegex = "https?://new\\.qq\\.com/";
    public static final String listPageUrlRegex = "https://i\\.news\\.qq\\.com/trpc\\.qqnews_web\\.kv_srv\\.kv_srv_http_proxy/list\\S*";
    public static final String sportsUrlRegex = "https://pacaio\\.match\\.qq\\.com/irs/rcd\\S*";
    public static final String articleUrlRegex = "https?://new\\.qq\\.com/\\S*/\\S{8}/\\S*\\.html";
    public static final String commentUrlRegex = "https://coral\\.qq\\.com/article/\\d*/comment/v2\\S*";
    public static final String commentsNumUrlRegex = "https://coral\\.qq\\.com/article/\\d*/commentnum\\?source=1";
    public static final String newUrlRegex = "https://new\\.qq\\.com/rain/a/\\S*";

    public static final String listPageUrlFormat = "https://i.news.qq.com/trpc.qqnews_web.kv_srv.kv_srv_http_proxy/list?" +
            "sub_srv_id=#sub_srv_id&srv_id=pc&offset=#offset&limit=20&strategy=1&ext=%7b%22pool%22%3a%5b%22high%22%2c%22top%22%5d%2c%22is_filter%22%3a10%2c%22check_type%22%3atrue%7d";
    public static final String sportsUrlFormat = "https://pacaio.match.qq.com/irs/rcd?cid=52&token=%s&ext=%s&page=%s&isForce=1";
    public static final String commentUrlFormat = "https://coral.qq.com/article/%s/comment/v2?oriorder=t&pageflag=1&scorecursor=0&orirepnum=2&reporder=o&reppageflag=1&source=1&orinum=30&cursor=%s";
    public static final String commentsNumUrlFormat = "https://coral.qq.com/article/%s/commentnum?source=1";
    public static final String newUrlFormat = "https://new.qq.com/rain/a/%s";

    private static final String scriptSite = "news";
    public static final String SPORTS_TOKEN = "sports_token";
    public static final String SPORTS_EXT = "sports_ext";

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        HttpRequest httpRequest = requestRecord.getHttpRequest();
        if(requestRecord.tagsCreator().bizTags().hasKVTag(Tag_Field_Domain_Result_Json)){
            KVTag domainResultJson = requestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag(Tag_Field_Domain_Result_Json);
            CrawlerDomainUrls crawlerDomainUrls = JSON.parseObject(String.valueOf(domainResultJson.getVal()),CrawlerDomainUrls.class);
            String url = crawlerDomainUrls.getUrl();
            JSONObject contentObj = JSONObject.parseObject(url);
            String articleUrl = contentObj.getString(Field_Urls);
            Long releaseTime = contentObj.getLong("releaseTime");
            String site_info = contentObj.getString("site_info");
            if (StringUtils.isNotBlank(site_info)){
                requestRecord.tagsCreator().bizTags().addCustomKV(Tag_Site_Info,site_info);
            }
            requestRecord.setReleaseTime(releaseTime);
            requestRecord.setNeedParsedPage(true);
            requestRecord.tagsCreator().resultTags().addResultDataType(article);
            requestRecord.tagsCreator().resultTags().addResultDataType(interaction);
            httpRequest.setUrl(articleUrl);
            requestRecord.tagsCreator().bizTags().getCategoryTag().getKvTags().remove(Tag_Field_Domain_Result_Json); //移除
        }
        return super.prepareRequest(requestRecord, supportSourceRecords);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        if (404 == httpPage.getStatusCode()){
            logger.info("status code is 404");
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        if (doHttpPageCheck(crawlerRequestRecord,httpPage)){
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(indexUrlRegex)){
            Map<String, Object> extras = lastRequest.getExtras();
            List<String> keywords = castList(extras.get("keywords"),String.class);
            for (String keyword : keywords) {
                String listUrl = listPageUrlFormat.replace("#sub_srv_id",keyword).replace("#offset","0");
                CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(listUrl)
                        .recordKey(listUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .build();
                requestRecord.tagsCreator().bizTags().addCustomKV(Tag_Site_Info,keyword);
                parsedLinks.add(requestRecord);

                if ("sports".equals(keyword)){
                    String token = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(SPORTS_TOKEN);
                    String ext = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(SPORTS_EXT);
                    if (StringUtils.isBlank(token) || StringUtils.isBlank(ext)){
                        logger.error("sports bk crawler need 'token' and 'ext' info ! token : {}, ext : {}",token,ext);
                        continue;
                    }
                    String sportsUrl = String.format(sportsUrlFormat, token, URLEncoder.encode(ext), 0);
                    CrawlerRequestRecord sportsRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(requestRecord)
                            .httpUrl(sportsUrl)
                            .recordKey(sportsUrl)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .copyBizTags()
                            .build();
                    parsedLinks.add(sportsRecord);
                }
            }
            return parsedLinks;
        }
        if (lastRequestUrl.matches(listPageUrlRegex)){
            return parseListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(sportsUrlRegex)){
            return parseSportsListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex) || lastRequestUrl.matches(newUrlRegex)){
            return parseArticleLink(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(commentUrlRegex)){
            return parseCommentLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseSportsListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
        JSONObject pageObject = JSONObject.parseObject(httpPage.getRawText());
        int dataNum = pageObject.getIntValue("datanum");
        if (null != urlParams && dataNum > 0){
            String token = (String)urlParams.get("token");
            String ext = (String)urlParams.get("ext");
            String page = (String)urlParams.get("page");
            int pageNo = Integer.parseInt(page) + 1;
            if (pageNo < 200){
                String sportsUrl = String.format(sportsUrlFormat,token,ext,pageNo);
                CrawlerRequestRecord sportsRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(sportsUrl)
                        .recordKey(sportsUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                parsedLinks.add(sportsRecord);
            }

            JSONArray itemNodes = pageObject.getJSONArray("data");
            for (Object itemNode : itemNodes) {
                JSONObject dataObj = (JSONObject)itemNode;
                String articleUrl = dataObj.getString("vurl");
                String pubTime = dataObj.getString("publish_time");
                try {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(articleUrl)
                            .recordKey(articleUrl)
                            .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                            .copyBizTags()
                            .needParsed(true)
                            .needWashed(false)
                            .resultLabelTag(article)
                            .resultLabelTag(interaction)
                            .build();
                    itemRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
                    parsedLinks.add(itemRecord);
                } catch (Exception e) {
                    logger.error(e.getMessage(),"parse sports list has exception");
                }
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setNeedWashPage(true);
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        JSONObject dataObj = pageObj.getJSONObject("data");
        String commentId = dataObj.getString("targetid");
        boolean hasNext = dataObj.getBooleanValue("hasnext");
        if (hasNext){
            String last = dataObj.getString("last");
            String commentUrl = String.format(commentUrlFormat,commentId,last);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .notFilterRecord()
                    .build();
            commentRecord.getHttpRequest().setExtras(copyExtras(crawlerRequestRecord.getHttpRequest().getExtras()));
            parsedLinks.add(commentRecord);
        }

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLink(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //判断是否进入了跳链
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String rawText = httpPage.getRawText();
        if (rawText.contains("location.href='/rain/a/'+result[1]")){
            logger.info("article url has been turn to a new uri");
            String articleKey = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1).split("\\.")[0];
            String newUrl = String.format(newUrlFormat,articleKey);
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(newUrl)
                    .recordKey(newUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .copyBizTags()
                    .needParsed(true)
                    .needWashed(false)
                    .resultLabelTag(article)
                    .resultLabelTag(interaction)
                    .build();
            itemRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
            parsedLinks.add(itemRecord);
            return parsedLinks;
        }
        crawlerRequestRecord.setNeedWashPage(true);
        //从源码找commentId
        Matcher mtInfo = Pattern.compile("\\{([^;])*\"article_id\"([^;])*?\\}").matcher(rawText);
        String articleInfo = "";
        while (mtInfo.find()){
            articleInfo = mtInfo.group(0);
            break;
        }
        try {
            JSONObject infoObj = JSONObject.parseObject(articleInfo);
            String articleId = infoObj.getString("article_id");
            crawlerRequestRecord.getHttpRequest().addExtra("articleId",articleId);
            crawlerRequestRecord.getHttpRequest().addExtra("articleUrl",httpRequestUrl);


            String commentId = infoObj.getString("comment_id");
            String commentsNumUrl = String.format(commentsNumUrlFormat,commentId);
            CrawlerRequestRecord commentsNumRecord = CrawlerRequestRecord.builder()
                    .startPageRequest(domain(), CrawlerEnum.CrawlerRequestType.internalDownload)
                    .httpUrl(commentsNumUrl)
                    .recordKey(commentsNumUrl)
                    .build();
            parsedLinks.add(commentsNumRecord);

            //判断是否采集评论
            CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
            if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
                if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                    logger.error("tenxun news crawler comment need to filter information!");
                    return parsedLinks;
                }
                KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);
                String commentUrl = String.format(commentUrlFormat, commentId, 0);
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .notFilterRecord()
                        .copyBizTags()
                        .build();
                commentRecord.setFilter(filterInfoRecord.getFilter());
                commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                commentRecord.getHttpRequest().addExtra("articleId",articleId);
                commentRecord.getHttpRequest().addExtra("articleUrl",httpRequestUrl);
                parsedLinks.add(commentRecord);
            }

        } catch (Exception e) {
            logger.error("parse page info failed");
            return parsedLinks;
        }

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
        if (null ==urlParams){
            logger.error("parse list url params has error with : {}",httpRequestUrl);
            return parsedLinks;
        }
        String sub_srv_id = (String) urlParams.get("sub_srv_id");
        String offset = (String) urlParams.get("offset");
        String limit = (String) urlParams.get("limit");
        offset = String.valueOf(Integer.parseInt(offset) + Integer.parseInt(limit));
        if(Integer.parseInt(offset) < 200){
            String nextPageUrl = listPageUrlFormat.replace("#sub_srv_id",sub_srv_id).replace("#offset",offset);
            CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(nextPageRecord);
        }

        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        JSONArray objects = pageObj.getJSONObject("data").getJSONArray("list");
        for (Object obj : objects) {
            JSONObject dataObj = (JSONObject)obj;
            String url = dataObj.getString("url");
            String publishTime = dataObj.getString("publish_time");
            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .recordKey(url)
                        .releaseTime(DateUtils.parseDate(publishTime,"yyyy-MM-dd HH:mm:ss").getTime())
                        .copyBizTags()
                        .needParsed(true)
                        .needWashed(false)
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                itemRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                logger.error(e.getMessage());
            }
        }
        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpRequest httpRequest = internalDownloadRecord.getHttpRequest();
            HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
            if (!internalDownloadPage.isDownloadSuccess()){
                crawlerRecord.setNeedWashPage(false);
                crawlerRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                links.add(crawlerRecord);
                break;
            }
            String requestUrl = httpRequest.getUrl();
            if (requestUrl.matches(commentsNumUrlRegex)){
                try {
                    JSONObject pageObj = JSONObject.parseObject(internalDownloadPage.getRawText());
                    JSONObject dataObj = pageObj.getJSONObject("data");
                    String comments = dataObj.getString("commentnum");
                    crawlerRecord.getHttpRequest().addExtra("comments",comments);
                }catch (Exception e){
                    crawlerRecord.getHttpRequest().addExtra("comments","0");
                }

            }

        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String requestUrl = httpRequest.getUrl();
        String articleId = (String) extras.get("articleId");
        String articleUrl = (String) extras.get("articleUrl");
        String rawText = httpPage.getRawText();
        Html html = httpPage.getHtml();
        if (StringUtils.isBlank(rawText)){
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            Matcher mtInfo = Pattern.compile("\\{([^;])*\"article_id\"([^;])*?\\}").matcher(rawText);
            String articleInfo = "";
            while (mtInfo.find()){
                articleInfo = mtInfo.group(0);
                break;
            }
            JSONObject infoObj = JSONObject.parseObject(articleInfo);
            String title = infoObj.getString("title");
            String author = infoObj.getString("media");
            String authorId = infoObj.getString("media_id");
            String tags = infoObj.getString("tags");
            List<String> contents = html.xpath("//div[@class=\"content-article\"]//p//text()").all();
            StringBuffer sbContent = new StringBuffer();
            for (String content : contents) {
                sbContent.append(content.trim());
            }
            List<String> images = html.xpath("//div[@class=\"content-article\"]//img//@src").all();
            StringBuffer sbImage = new StringBuffer();
            for (String image : images) {
                sbImage.append(image).append("\\x01");
            }
            List<String> allTags = Arrays.asList(tags.split(","));
            String comments = (String) extras.get("comments");
            CrawlerData crawlerAData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(articleUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .dataId(StringUtils.joinWith("-",domain(),site,article,articleId))
                    .addContentKV(Field_Title,title)
                    .addContentKV(Field_Author,author)
                    .addContentKV(Field_Author_Id,authorId)
                    .addContentKV(Field_Content,sbContent.toString())
                    .addContentKV(Field_Images,sbImage.toString())
                    .resultLabelTag(article)
                    .flowInPipelineTag("result_kafka")
                    .build();
            crawlerAData.setFilterPipelineResult(true);
            crawlerAData.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type,allTags);
            crawlerDataList.add(crawlerAData);

            //流向redis的data
            CrawlerData crawlerRedisResultData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(httpRequest.getUrl())
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleId))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_Urls,crawlerRequestRecord.getHttpRequest().getUrl())
                    .addContentKV("releaseTime",String.valueOf(crawlerRequestRecord.getReleaseTime()))
                    .addContentKV("site_info",crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(Tag_Site_Info))
                    .resultLabelTag(article)
                    .flowInPipelineTag("result_redis")
                    .build();
            crawlerRedisResultData.setFilterPipelineResult(true);
            crawlerDataList.add(crawlerRedisResultData);

            if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                CrawlerData crawlerIData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(articleUrl)
                        .releaseTime(System.currentTimeMillis())
                        .dataId(StringUtils.joinWith("-",domain(),site,interaction,articleId))
                        .parentId(StringUtils.joinWith("-",domain(),site,article,articleId))
                        .addContentKV(Field_I_Comments,comments)
                        .resultLabelTag(interaction)
                        .flowInPipelineTag("result_kafka")
                        .build();
                crawlerIData.setFilterPipelineResult(true);
                crawlerDataList.add(crawlerIData);
            }

        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)){
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONObject dataObj = pageObj.getJSONObject("data");
            //取出评论列表，回复列表，用户信息列表
            JSONArray commentsLists = dataObj.getJSONArray("oriCommList");
            JSONObject repCommList = dataObj.getJSONObject("repCommList");
            JSONObject userList = dataObj.getJSONObject("userList");
            for (Object commentsList : commentsLists) {
                JSONObject commentObj = (JSONObject)commentsList;
                String commentId = commentObj.getString("id");
                String authorId= commentObj.getString("userid");
                String author = getAuthorName(authorId,userList);
                String time = commentObj.getString("time") + "000";
                String content = commentObj.getString("content");
                String likes = commentObj.getString("up");
                String comments = commentObj.getString("repnum");

                CrawlerData crawlerCData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(articleUrl)
                        .releaseTime(new Long(time))
                        .dataId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                        .parentId(StringUtils.joinWith("-",domain(),site,article,articleId))
                        .addContentKV(Field_Content,content)
                        .addContentKV(Field_Author,author)
                        .addContentKV(Field_Author_Id,authorId)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .resultLabelTag(comment)
                        .flowInPipelineTag("result_kafka")
                        .build();
                crawlerCData.setFilterPipelineResult(true);
                crawlerDataList.add(crawlerCData);

                if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                    CrawlerData crawlerIData = CrawlerData.builder()
                            .data(crawlerRequestRecord,httpPage)
                            .url(articleUrl)
                            .releaseTime(new Long(time))
                            .dataId(StringUtils.joinWith("-",domain(),site,interaction,commentId))
                            .parentId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                            .addContentKV(Field_I_Likes,likes)
                            .addContentKV(Field_I_Comments,comments)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .resultLabelTag(interaction)
                            .flowInPipelineTag("result_kafka")
                            .build();
                    crawlerIData.setFilterPipelineResult(true);
                    crawlerDataList.add(crawlerIData);
                }

                if (Integer.parseInt(comments) > 0){
                    JSONArray replies = repCommList.getJSONArray(commentId);
                    for (Object reply : replies) {
                        JSONObject replyObj = (JSONObject)reply;
                        crawlerDataList.addAll(getRepliesDatas(crawlerRequestRecord,httpPage,replyObj,userList,articleUrl,articleId,site));
                    }

                }

            }
        }
        return crawlerDataList;
    }

    private List<CrawlerData> getRepliesDatas(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, JSONObject commentObj, JSONObject userList, String articleUrl, String articleId, String site) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String commentId = commentObj.getString("id");
        String authorId= commentObj.getString("userid");
        String author = getAuthorName(authorId,userList);
        String time = commentObj.getString("time") + "000";
        String content = commentObj.getString("content");
        String likes = commentObj.getString("up");
        String comments = commentObj.getString("repnum");

        CrawlerData crawlerCData = CrawlerData.builder()
                .data(crawlerRequestRecord,httpPage)
                .url(articleUrl)
                .releaseTime(new Long(time))
                .dataId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                .parentId(StringUtils.joinWith("-",domain(),site,article,articleId))
                .addContentKV(Field_Content,content)
                .addContentKV(Field_Author,author)
                .addContentKV(Field_Author_Id,authorId)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                .resultLabelTag(comment)
                .flowInPipelineTag("result_kafka")
                .build();
        crawlerCData.setFilterPipelineResult(true);
        crawlerDataList.add(crawlerCData);

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
            CrawlerData crawlerIData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(articleUrl)
                    .releaseTime(new Long(time))
                    .dataId(StringUtils.joinWith("-",domain(),site,interaction,commentId))
                    .parentId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                    .addContentKV(Field_I_Likes,likes)
                    .addContentKV(Field_I_Comments,comments)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .resultLabelTag(interaction)
                    .flowInPipelineTag("result_kafka")
                    .build();
            crawlerIData.setFilterPipelineResult(true);
            crawlerDataList.add(crawlerIData);
        }
        return crawlerDataList;
    }

    private String getAuthorName(String uid, JSONObject userList){
        JSONObject userObj = userList.getJSONObject(uid);
        if (null == userObj){
            return "";
        }
        return userObj.getString("nick");
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexUrlRegex);
        addUrlRegular(listPageUrlRegex);
        addUrlRegular(newUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentUrlRegex);
        addUrlRegular(sportsUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return "tenxun";
    }


    /**
     * 检查页面下载是否成功、完整
     * @param crawlerRequestRecord last record
     * @param httpPage page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200){
            logger.error("download page {} error, status code is {}",lastRequestUrl,statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()){
            logger.error("download page failed, check your link {}",lastRequestUrl);
            return true;
        }
        if (StringUtils.isBlank(httpPage.getRawText())){
            logger.error("download page empty, check your link {}",lastRequestUrl);
            return true;
        }
        return false;
    }

    /**
     * 将url参数转换成map
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = url;
        if (url.contains("?")){
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    public static Map<String, Object> copyExtras(Map<String,Object> inExtras){
        Map<String,Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(),entry.getValue());
        }
        return extras;
    }

    public static <T> List<T> castList(Object obj, Class<T> clazz) {
        List<T> result = new ArrayList<T>();
        if(obj instanceof List<?>)
        {
            for (Object o : (List<?>) obj)
            {
                result.add(clazz.cast(o));
            }
            return result;
        }
        return null;
    }

    private static List<String> agentList = new ArrayList<>();

    static {
        agentList.add("Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)");
        agentList.add("Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.28.3 (KHTML, like Gecko) Version/3.2.3 ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/525.28.3");
        agentList.add("Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16");
        agentList.add("Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14");
        agentList.add("Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14");
        agentList.add("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14");
        agentList.add("Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02");
        agentList.add("Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0");
        agentList.add("Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.13 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400");
    }

    private static String getRandomUA(){
        return agentList.get(RandomUtils.nextInt(0,agentList.size() - 1));
    }
}
