package com.chance.cc.crawler.development.scripts.sougou.searchKw;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Tag_Field_Topic_Type;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/4/19 16:29
 * @Description 搜狗问问  上方搜索栏进行搜索
 **/
public class SouGouSearchKwCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(SouGouSearchKwCrawlerScript.class);
    private static final String DOMAIN = "sougou";
    private static final String SITE = "searchKw";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";

    private static final String PREFIX = "https://www.sogou.com/sogou";
    private static final String ENTRANCE_URL = "https://www.sogou.com/";
    private static final String SEARCH_URL = "http[s]*://www.sogou.com/sogou\\?\\S*query=\\S*";
    private static final String ITEM_URL = "http[s]*://wenwen.sogou.com/z/q\\d+.htm";
    private static final String COMMENT_PREFIX = "https://wenwen.sogou.com";
    private static final String COMMENT_URL = "http[s]*://wenwen.sogou.com/z/q\\d+.htm\\?pg=\\d+\\S*";


    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ENTRANCE_URL);
        addUrlRegular(SEARCH_URL);
        addUrlRegular(ITEM_URL);
        addUrlRegular(COMMENT_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String siteTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return siteTag.equals(SITE);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }

        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() == 503) {
            log.error(DOMAIN + " page download error!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        if (httpPage.getStatusCode() != 503 && httpPage.getStatusCode() != 200) {
            log.error(DOMAIN + httpPage.getStatusCode());
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        if (requestUrl.matches(SEARCH_URL)) {
            searchUrlRecord(crawlerRequestRecord, httpPage, parsedLinks);
        }

        if (requestUrl.matches(ITEM_URL)) {
            itemUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(COMMENT_URL)){
            commentUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        return parsedLinks;
    }

    private void searchUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String keyword = (String) httpPage.getRequest().getExtras().get("keyword");
        //翻页
        String nextUrl = httpPage.getHtml().xpath("//a[text()='下一页']/@href").get();
        if (StringUtils.isNotBlank(nextUrl)) {
            nextUrl = PREFIX + StringEscapeUtils.unescapeHtml(nextUrl);
            CrawlerRequestRecord nextRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            nextRecord.getHttpRequest().setExtras(httpPage.getRequest().getExtras());
            parsedLinks.add(nextRecord);
        }


        //列表解析
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"results\"]/div").nodes();
        for (Selectable node : nodes) {
            String itemUrl = node.xpath(".//div[@class=\"fb\"]/a/@href").get();
            if (StringUtils.isBlank(itemUrl)) {
                continue;
            }

            String url = "";
            List<NameValuePair> parse = URLEncodedUtils.parse(itemUrl.split("\\?")[1], Charset.defaultCharset());
            for (NameValuePair nameValuePair : parse) {
                if ("url".equals(nameValuePair.getName())) {
                    url = nameValuePair.getValue();
                    break;
                }
            }

            if (StringUtils.isBlank(url)) {
                continue;
            }

            String releaseTime = "";
            String interaction = node.xpath(".//p[@class=\"str_time\"]").get();
            String comments = "0";
            String likes = "0";
            if (StringUtils.isNotBlank(interaction)) {
                if (interaction.contains(" - ")) {
                    String[] split = interaction.split(" - ");
                    for (String data : split) {
                        if (data.contains("个答案")) {
                            comments = data.split("个")[0].trim();
                        }
                        if (data.contains("提问时间: ")) {
                            releaseTime = data.split(": ")[1].trim();
                        }
                        if (data.contains("个赞")) {
                            likes = data.split("个")[0].trim();
                        }
                    }
                }else{
                    if (interaction.contains("个答案")) {
                        comments = interaction.split("个")[0].trim();
                    }
                    if (interaction.contains("提问时间: ")) {
                        releaseTime = interaction.split(": ")[1].trim();
                    }
                    if (interaction.contains("个赞")) {
                        likes = interaction.split("个")[0].trim();
                    }
                }
            }

            try {
                long releaseTimeToLong = StringUtils.isBlank(releaseTime) ? System.currentTimeMillis() : DateUtils.parseDate(releaseTime, "yyyy-MM-dd").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .recordKey(url + keyword)
                        .releaseTime(releaseTimeToLong)
                        .copyBizTags()
                        .needWashed(false)
                        .copyResultTags()
                        .build();
                itemRecord.getHttpRequest().addExtra("comments", comments);
                itemRecord.getHttpRequest().addExtra("likes", likes);
                boolean getTime = false;
                if(StringUtils.isBlank(releaseTime)){
                    getTime = true;
                }
                itemRecord.tagsCreator().bizTags().addCustomKV("getTime",getTime);//是否需要获取详情页上最早时间，true需要获取
                itemRecord.setTurnPageFilterInfo(null);
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
    }

    private void itemUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        Boolean getTime = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagObjVal("getTime", boolean.class);
        //需要获取详情页的时间
        List<String> all = httpPage.getHtml().xpath("//div[@class=\"user-txt\"]/text()").all();
        if(all == null || all.size() < 1){
            log.error("item url is erro!will retry");
            requestAgainCrawlerRecord(parsedLinks,crawlerRequestRecord);
            return;
        }

        if(getTime){
            try {
                long releaseTimeToLong = DateUtils.parseDate(all.get(0).split("回答")[0].trim(),"yyyy-MM-dd").getTime();
                for (String data : all) {
                    data = data.split("回答")[0].trim();
                    long time = DateUtils.parseDate(data,"yyyy-MM-dd").getTime();
                    if(time < releaseTimeToLong){
                        releaseTimeToLong = time;
                    }
                }
                crawlerRequestRecord.setReleaseTime(releaseTimeToLong);

                CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
                if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
                    Long startTime = null;
                    Long endTime = null;
                    for (FilterInfo filterInfo : filterInfos) {
                        if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                            long[] dateAllowRange = filterInfo.getDateAllowRange();
                            int hourFromNow = filterInfo.getHourFromNow();
                            if (dateAllowRange != null) {
                                startTime = dateAllowRange[0];
                                endTime = dateAllowRange[1];
                            }else if(hourFromNow != 0){
                                endTime = System.currentTimeMillis();
                                startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                            }
                        }
                    }
                    if(startTime != null && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime){
                        crawlerRequestRecord.setNeedWashPage(true);
                    }
                }else{
                    crawlerRequestRecord.setNeedWashPage(true);
                }
            } catch (Exception e) {
                log.error(e.getMessage());
            }
        }else{
            crawlerRequestRecord.setNeedWashPage(true);
        }

        boolean needWashPage = crawlerRequestRecord.isNeedWashPage();
        if(!needWashPage){
            return;
        }

        CrawlerResultTags resultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (resultTags.hasDataType(comment)) {
            resultTags.getCategoryTag().removeLabelTag("comment");

            CrawlerRequestRecord filterRecord = resultTags.getCategoryTag().getKVTagObjVal("comment_filter_record", CrawlerRequestRecord.class);
            if (filterRecord == null) {
                log.error("filter record can not null !");
                return;
            }

            //评论数要大于0 顺序排列
            int comments = Integer.parseInt((String) httpPage.getRequest().getExtras().get("comments"));
            if (comments > 0) {
                String commentUrl = httpPage.getHtml().xpath("//a[@class=\"btn-page-next next_page\"]/preceding-sibling::a[1]/@href").get();
                if(StringUtils.isBlank(commentUrl)){
                    commentUrl = httpPage.getRequest().getUrl() + "?pg=0";
                }else{
                    commentUrl = COMMENT_PREFIX + commentUrl;
                }
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .needWashed(true)
                        .resultLabelTag(comment)
                        .build();
                commentRecord.setFilter(filterRecord.getFilter());
                commentRecord.setFilterInfos(filterRecord.getFilterInfos());
                parsedLinks.add(commentRecord);
            }
        }
    }

    private void commentUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks){
        //评论互动量
        String requestUrl = httpPage.getRequest().getUrl();
        String key = requestUrl.substring(requestUrl.lastIndexOf("/q") + 2, requestUrl.lastIndexOf("."));
        String url = "https://wenwen.sogou.com/wapi/ms/zancai?qid=" + key;
        CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .needParsed(false)
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(interactionRecord);

        String commentUrl = httpPage.getHtml().xpath("//a[@class=\"btn-page-prev prev_page\"]/@href").get();
        if(StringUtils.isNotBlank(commentUrl)){
            commentUrl = COMMENT_PREFIX + StringEscapeUtils.unescapeHtml(commentUrl);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .needWashed(true)
                    .copyResultTags()
                    .build();
            parsedLinks.add(commentRecord);
        }
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        Map<String,Map<String,String>> map = new HashMap<>();
        Map<String,Object> extras = new HashMap<>();
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpPage downloadPage = internalDownloadRecord.getInternalDownloadPage();
            List<String> all = downloadPage.getJson().jsonPath($_type + ".zancais").all();
            for (String data : all) {
                JSONObject jsonObject = JSONObject.parseObject(data);
                String id = jsonObject.getString("answerId");
                String likes = jsonObject.getString("zan");
                String unLikes = jsonObject.getString("cai");
                Map<String,String> interactionMap = new HashMap<>();
                interactionMap.put("likes",likes);
                interactionMap.put("unLikes",unLikes);
                map.put(id,interactionMap);
            }
        }
        extras.put("interactionMap",map);
        crawlerRecord.getHttpRequest().setExtras(extras);
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        if (crawlerResultTags.hasDataType(comment)) {
            crawlerDataList.addAll(washComment(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        String title = httpPage.getHtml().xpath("//span[@class=\"detail-tit\"]/text()").get();
        List<String> all = httpPage.getHtml().xpath("//pre[@class=\"detail-tit-info\"]//text()").all();
        StringBuffer contentBf = new StringBuffer();
        for (String s : all) {
            contentBf.append(s).append(" ");
        }
        List<String> imgs = httpPage.getHtml().xpath("//pre[@class=\"detail-tit-info\"]/img/@src").all();
        List<String> all1 = httpPage.getHtml().xpath("//div[@class=\"tags\"]/a/text()").all();

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                .url(itemUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_Title, title)
                .addContentKV(Field_Images, imgs.toString())
                .addContentKV(Field_Content,contentBf.toString())
                .build();
        if(all1 != null && all1.size() > 0){
            crawlerData.tagsCreator().bizTags().addCustomKV(Tag_Field_Topic_Type,all1);
        }

        return crawlerData;
    }

    private CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

        Map<String, Object> extras = httpPage.getRequest().getExtras();
        String comments = (String) extras.get("comments");
        String likes = (String) extras.get("likes");

        CrawlerData  crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .url(itemUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_I_Comments, comments)
                    .addContentKV(Field_I_Likes, likes)
                    .build();

        return crawlerData;
    }

    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        String articleKey = requestUrl.substring(requestUrl.lastIndexOf("/") + 1, requestUrl.lastIndexOf("."));

        Map<String,Map<String,String>> extras = (Map<String,Map<String,String>>)crawlerRequestRecord.getHttpRequest().getExtras().get("interactionMap");
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"replay-wrap common_answers\"]/div[@class=\"replay-section answer_item\"]").nodes();
        if(requestUrl.contains("pg=0")){
            nodes = httpPage.getHtml().xpath("//div[@class=\"replay-section answer_item\"]").nodes();
        }
        for (Selectable node : nodes) {
            String commentId = node.xpath("./@data-id").get();
            String author = node.xpath(".//a[@class=\"user-name\"]/text()|.//span[@class=\"user-name\"]/text()").get();
            String userTag = node.xpath(".//span[@class=\"user-level-tag\"]/text()").get();
            String releaseTime = node.xpath(".//div[@class=\"user-txt\"]/text()").get();
            if(StringUtils.isBlank(releaseTime)){
                continue;
            }
            List<String> all = node.xpath(".//pre[@class=\"replay-info-txt answer_con\"]//text()").all();
            StringBuffer contentBf = new StringBuffer();
            for (String s : all) {
                contentBf.append(s).append(" ");
            }

            try {
                long releaseTimeToLong = DateUtils.parseDate(releaseTime.split("回答")[0].trim(), "yyyy-MM-dd").getTime();
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                        .releaseTime(releaseTimeToLong)
                        .url(requestUrl)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_Author, author)
                        .addContentKV(Field_Content, contentBf.toString())
                        .build();
                crawlerDataList.add(crawlerData);

                String likes = "0";
                String unLikes = "0";
                if(extras != null && extras.get(commentId) != null){
                    Map<String, String> map = extras.get(commentId);
                    likes = map.get("likes");
                    unLikes = map.get("unLikes");
                }
                CrawlerData interactionData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), commentId))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                        .releaseTime(releaseTimeToLong)
                        .url(requestUrl)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .addContentKV(Field_I_Likes, likes)
                        .addContentKV(Field_I_Dislikes, unLikes)
                        .build();
                crawlerDataList.add(interactionData);
            } catch (ParseException e) {
                log.error(e.getMessage());
            }
        }
        return crawlerDataList;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String url = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                turnRecord.getHttpRequest().addExtra("keyword", keyword);
                crawlerRecords.add(turnRecord);
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }

    public static void main(String[] args) {
        String s = "http://snapshot.sogoucdn.com/websnapshot?ie=utf8&amp;url=http%3A%2F%2Fwenwen.sogou.com%2Fz%2Fq727375224.htm&amp;did=3c28af542f2d49f7-c53fd3988c057bf5-333d39bb8f03cf995ab29a913d231337&amp;k=ba510f17102adc2f1d0571563ef8de9d&amp;encodedQuery=%E8%80%90%E5%85%8B&amp;query=%E8%80%90%E5%85%8B&amp;&amp;w=01020400&amp;m=0&amp;st=1";
        String s1 = StringEscapeUtils.unescapeHtml(s);
        try {
            String encode = URLDecoder.decode(StringEscapeUtils.unescapeHtml(s), "UTF-8");
            System.out.println(encode);
        } catch (UnsupportedEncodingException e) {
            e.printStackTrace();
        }

        System.out.println(s1);
    }
}
