package com.chance.cc.crawler.development.scripts.iqiyi;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Field_Author_Follows;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/3/18 15:13
 * @Description 爱奇艺关键词采集脚本
 **/
public class IQiYiCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(IQiYiCrawlerScript.class);
    private static final String DOMAIN = "iqiyi";
    private static final String SITE = "searchKw";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";

    private static final String PRIFIX = "https://so.iqiyi.com";
    private static final String ENTRANCE_URL = "https://so.iqiyi.com/so";
    private static final String SEARCH_URL = "https://pcw-api.iqiyi.com/strategy/pcw/data/soBaseCardLeftSide\\S*";
    private static final String ITEM_URL = "http[s]*://www.iqiyi.com/v_[a-z0-9]*.html";
    private static final String INTERACTION_LIKE_SOURCE_URL = "http://iface2.iqiyi.com/like/count?businessType=14&entityId=%s&qyid=63204618cb07f6722139214f3b31f1b0";
    private static final String INTERACTION_LIKE_URL = "http://iface2.iqiyi.com/like/count\\S*";
    private static final String INTERACTION_VIEWS_SOURCE_URL = "https://pcw-api.iqiyi.com/video/video/hotplaytimes/%s";
    private static final String INTERACTION_VIEWS_URL = "https://pcw-api.iqiyi.com/video/video/hotplaytimes/\\S*";
    private static final String COMMENT_SOURCE_URL = "https://sns-comment.iqiyi.com/v3/comment/get_comments.action?agent_type=118&channel_id=8&agent_version=9.11.5&authcookie=null&business_type=17&content_id=%s&hot_size=10&last_id=&page=1&page_size=10&types=hot,time";
    private static final String COMMENT_URL = "https://sns-comment.iqiyi.com/v3/comment/get_comments.action\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(SEARCH_URL);
        addUrlRegular(ITEM_URL);
        addUrlRegular(COMMENT_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String siteTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return siteTag.equals(SITE);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() != 200) {
            log.error("{} status code : [{}]",requestUrl,httpPage.getStatusCode());
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (requestUrl.matches(SEARCH_URL)) {
            searchUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(ITEM_URL)) {
            itemUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if(requestUrl.matches(COMMENT_URL)){
            commentUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        return parsedLinks;
    }

    private void searchUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String keyword = (String) httpPage.getRequest().getExtras().get("keyword");
        String requestUrl = httpPage.getRequest().getUrl();
        JSONObject jsonObject = JSONObject.parseObject(httpPage.getRawText());
        try {
            String code = jsonObject.getString("code");
            if (StringUtils.isBlank(code) || !code.equals("A00000")) {
                requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
                crawlerRequestRecord.setNeedWashPage(false);
                return;
            }
            int resultNum = jsonObject.getJSONObject("data").getJSONObject("formatData").getIntValue("resultNum");
            if (resultNum < 1) {
                log.error("search {} result num is 0!", keyword);
                return;
            }


            //下一页
            String nextUrl = getNextUrl(requestUrl, "key", "pageNum");
            if (StringUtils.isNotBlank(nextUrl)) {
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(nextUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
                parsedLinks.add(turnRecord);
            }

            JSONArray itemList = jsonObject.getJSONObject("data").getJSONObject("formatData").getJSONArray("list");
            for (Object o : itemList) {
                JSONObject itemObject = JSONObject.parseObject(o.toString());
                String itemUrl = itemObject.getString("g_main_link");
                String releaseTime = itemObject.getString("releaseTime");

                long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl + keyword)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                parsedLinks.add(itemRecord);
            }
        }catch (Exception e){
            log.error(e.getMessage());
        }
    }


    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String requestUrl = httpPage.getRequest().getUrl();
        String tvId = washContent("tvid=\\d+", httpPage.getRawText());
        if (StringUtils.isNotBlank(tvId)) {
            //likes
            String[] split = tvId.split("=");
            String likesUrl = String.format(INTERACTION_LIKE_SOURCE_URL, split[1]);
            CrawlerRequestRecord likeRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(likesUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .notFilterRecord()
                    .needWashed(false)
                    .needParsed(false)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                    .build();
            parsedLinks.add(likeRecord);

            CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
            if(resultTags.hasDataType(comment)){
                resultTags.getCategoryTag().removeLabelTag("comment");

                CrawlerRequestRecord filterRecord = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class);
                if(filterRecord == null){
                    log.error("filter record can not null !");
                    return;
                }

                String commentUrl = String.format(COMMENT_SOURCE_URL,split[1]);
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .needWashed(true)
                        .resultLabelTag(comment)
                        .build();
                commentRecord.getHttpRequest().addExtra("articleUrl",requestUrl);
                commentRecord.setFilter(filterRecord.getFilter());
                commentRecord.setFilterInfos(filterRecord.getFilterInfos());
                parsedLinks.add(commentRecord);
            }
        } else {
            log.error("tvId is null!");
            crawlerRequestRecord.getHttpRequest().addExtra("likes", "0");
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        try{
            httpPage.getJson().jsonPath($_type + ".data.comments").all();
        }catch (Exception e){
            log.error("comment down load is error!will retry");
            requestAgainCrawlerRecord(parsedLinks,crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return;
        }
        List<String> all = httpPage.getJson().jsonPath($_type + ".data.comments").all();
        if(all.size() < 1){
            return;
        }

        String lastId = JSONObject.parseObject(all.get(all.size() - 1)).getString("id");
        //评论翻页
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if("last_id".equals(name)){
                nextUrl = nextUrl + name + "=" + lastId + "&" ;
            } else if("page".equals(name)){
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            } else{
                nextUrl = nextUrl + name + "=" + value + "&";
            }
        }
        nextUrl = nextUrl.substring(0,nextUrl.length() - 1);
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .needWashed(true)
                .copyResultTags()
                .build();
        commentRecord.getHttpRequest().setExtras(crawlerRequestRecord.getHttpRequest().getExtras());
        parsedLinks.add(commentRecord);
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        String likes = "0";
        String views = "0";
        try {
            for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
                HttpPage page = internalDownloadRecord.getInternalDownloadPage();
                String url = page.getRequest().getUrl();

                if (url.matches(INTERACTION_LIKE_URL)) {
                    likes = page.getJson().jsonPath($_type + ".data").get();
                }
            }
        } catch (Exception e) {
            log.error(e.getMessage());
        } finally {
            crawlerRecord.getHttpRequest().addExtra("likes", likes);
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        String url = page.getRequest().getUrl();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRecord,page));
        }

        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        String videoInfo = washContent("video-info=[\\S\\s]*\"isDiffDue\":true}'>", httpPage.getRawText());
        CrawlerData crawlerData = null;
        if(StringUtils.isNotBlank(videoInfo)){
            videoInfo = videoInfo.substring(videoInfo.indexOf("{"),videoInfo.lastIndexOf("}") + 1);
            JSONObject jsonObject = JSONObject.parseObject(videoInfo);
            String title = jsonObject.getString("name");
            JSONObject user = jsonObject.getJSONObject("user");
            String author = "";
            String authorId = "";
            String follows = "0";
            if(user != null){
               author = user.getString("name");
               authorId  = user.getString("id");
               follows = user.getString("followerCount");
            }
            long releaseTimeToLong = jsonObject.getLongValue("lastPublishTime");
            crawlerRequestRecord.setReleaseTime(releaseTimeToLong);
            String content = jsonObject.getString("description");
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(releaseTimeToLong)
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Title, title)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Author_Follows,follows)
                    .build();
        }

        return crawlerData;
    }

    private CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("_") + 1, itemUrl.lastIndexOf("."));

        String comments = httpPage.getHtml().xpath("//i[@class=\"qy-svgicon qy-svgicon-comment-v1\"]/following-sibling::span/text()").get();
        comments = "评论".equals(comments) ? "0" : comments;
        Map<String, Object> extras = httpPage.getRequest().getExtras();
        String likes = (String) extras.get("likes");

        CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_I_Comments, comments)
                    .addContentKV(Field_I_Likes, likes)
                    .build();

        return crawlerData;
    }

    private List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        String articleUrl = (String) httpPage.getRequest().getExtras().get("articleUrl");
        String articleKey = articleUrl.substring(articleUrl.lastIndexOf("_") + 1,articleUrl.lastIndexOf("."));

        List<String> commentList = httpPage.getJson().jsonPath($_type + ".data.comments").all();
        for (String data : commentList) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String commentId = jsonObject.getString("id");
            JSONObject userInfo = jsonObject.getJSONObject("userInfo");
            String author = userInfo.getString("uname");
            String authorId = userInfo.getString("uid");
            long timestamp = Long.parseLong(jsonObject.getString("addTime") + "000");
            String content = jsonObject.getString("content");
            String floor = jsonObject.getString("floor");
            String likes = jsonObject.getString("likes");
            String comments = jsonObject.getString("replyCount");

            JSONArray replies = jsonObject.getJSONArray("replies");
            if(replies != null){
                for (Object reply : replies) {
                    JSONObject replyJson = (JSONObject)reply;
                    String replyCommentId = replyJson.getString("id");
                    JSONObject replyUserInfo = replyJson.getJSONObject("userInfo");
                    String replyAuthor = replyUserInfo.getString("uname");
                    String replyAuthorId = replyUserInfo.getString("uid");
                    long replyTimestamp = Long.parseLong(replyJson.getString("addTime") + "000");
                    String replyContent = replyJson.getString("content");
                    String replyFloor = replyJson.getString("floor");
                    String replyLikes = replyJson.getString("likes");
                    String replyComments = replyJson.getString("replyCount");

                    CrawlerData crawlerData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                            .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), replyCommentId))
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                            .releaseTime(replyTimestamp)
                            .url(requestUrl)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .addContentKV(Field_Author, replyAuthor)
                            .addContentKV(Field_Author_Id, replyAuthorId)
                            .addContentKV(Field_Content, replyContent)
                            .addContentKV(Field_Floor, replyFloor)
                            .addContentKV(Field_I_Likes, replyLikes)
                            .addContentKV(Field_I_Comments, replyComments)
                            .build();
                    crawlerData.tagsCreator().bizTags().addCustomKV(Field_Reply_Floor,floor);
                    crawlerDataList.add(crawlerData);
                }
            }

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentId))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                    .releaseTime(timestamp)
                    .url(requestUrl)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Id, authorId)
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Floor, floor)
                    .addContentKV(Field_I_Likes, likes)
                    .addContentKV(Field_I_Comments, comments)
                    .build();
            crawlerDataList.add(crawlerData);
        }
        return crawlerDataList;
    }

    private static String washContent(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String url = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                turnRecord.getHttpRequest().addExtra("keyword",keyword);
                crawlerRecords.add(turnRecord);
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }


    /**
     * 获取下一页的地址
     * @param requestUrl
     * @param keyword
     * @param page
     * @return
     */
    private String getNextUrl(String requestUrl, String keyword, String page) {
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if (StringUtils.isNotBlank(page) && page.equals(name)) {
                nextUrl = nextUrl + name + "=" + (Integer.parseInt(value) + 1) + "&";
            } else if (StringUtils.isNotBlank(keyword) && keyword.equals(name)) {
                try {
                    nextUrl = nextUrl + name + "=" + URLEncoder.encode(value, "UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
//                    log.error(e.getMessage());
                }
            } else {
                nextUrl = nextUrl + name + "=" + value + "&";
            }
        }
        return nextUrl.substring(0, nextUrl.length() - 1);
    }
}
