package com.chance.cc.crawler.development.scripts.sohu;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2021-03-25 14:07:42
 * @email okprog@sina.com
 */
public class SoHuNewsCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(SoHuNewsCrawlerScript.class);

    public static final String indexUrlRegex = "https://www\\.sohu\\.com/";
    public static final String listPageUrlRegex = "https://v2\\.sohu\\.com/public-api/feed\\S*";
    public static final String articleUrlRegex = "https?://www\\.sohu\\.com/a/\\d*_\\d*";
    public static final String commentsNumUrlRegex = "https://apiv2\\.sohu\\.com/api/topic/load\\?page_size=1&source_id=mp_\\d*";
    public static final String commentUrlRegex = "https://apiv2\\.sohu\\.com/api/comment/list\\S*";
    public static final String viewsUrlRegex = "https://v2\\.sohu\\.com/public-api/articles/\\d*/pv";
    public static final String likesUrlRegex = "https://api\\.interaction\\.sohu.com/api/topic/like/count\\S*";

    public static final String listPageUrlFormat = "https://v2.sohu.com/public-api/feed?scene=CHANNEL&sceneId=%s&page=%s&size=20";
    public static final String articleUrlFormat = "https://www.sohu.com/a/%s_%s";
    public static final String commentsNumUrlFormat = "https://apiv2.sohu.com/api/topic/load?page_size=1&source_id=mp_%s";
    public static final String commentUrlFormat = "https://apiv2.sohu.com/api/comment/list?page_size=20&topic_id=%s&page_no=%s&source_id=mp_%s";
    public static final String viewsUrlFormat = "https://v2.sohu.com/public-api/articles/%s/pv";
    public static final String likesUrlFormat = "https://api.interaction.sohu.com/api/topic/like/count?source_id=mp_%s";

    private static final String scriptSite = "news";


    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        if (404 == httpPage.getStatusCode()){
            logger.info("status code is 404");
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        if (doHttpPageCheck(crawlerRequestRecord,httpPage)){
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(indexUrlRegex)){
            Map<String, Object> extras = lastRequest.getExtras();
            Map<String, String> keysMap = castMap(extras.get("keysMap"),String.class);
            Set<Map.Entry<String, String>> entries = keysMap.entrySet();
            for (Map.Entry<String, String> entry : entries) {
                String urlKey = entry.getValue();
                String keyword = entry.getKey();
                String listUrl = String.format(listPageUrlFormat,urlKey,1);
                CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(listUrl)
                        .recordKey(listUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .build();
                requestRecord.tagsCreator().bizTags().addKeywords(keyword);
                parsedLinks.add(requestRecord);
            }

            return parsedLinks;
        }
        if (lastRequestUrl.matches(listPageUrlRegex)){
            return parseListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex)){
            return parseArticleLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(commentsNumUrlRegex)){
            return parseCommentsNumLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(commentUrlRegex)){
            return parseCommentLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setNeedWashPage(true);
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        try {
            Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
            String pageNo = (String) urlParams.get("page_no");

            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONObject jsonObject = pageObj.getJSONObject("jsonObject");
            int totalPageNo = jsonObject.getIntValue("total_page_no");

            if (Integer.parseInt(pageNo) < totalPageNo){
                pageNo += 1;
                String articleId = (String)extras.get("articleId");
                String topicId = (String)extras.get("topicId");
                String commentUrl = String.format(commentUrlFormat,topicId,pageNo,articleId);
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .notFilterRecord()
                        .build();
                commentRecord.getHttpRequest().setExtras(copyExtras(extras));
                commentRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
                parsedLinks.add(commentRecord);
            }
        } catch (Exception e) {
            logger.error("parse comment list page error");
            crawlerRequestRecord.setNeedWashPage(false);
        }

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCommentsNumLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setNeedWashPage(true);
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String articleId = (String)extras.get("articleId");
        try {
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONObject jsonObject = pageObj.getJSONObject("jsonObject");
            String comments = jsonObject.getString("cmt_sum");
            String topicId = jsonObject.getString("topic_id");
            extras.put("comments",comments);
            //内下载views，likes
            parsedLinks.add(getViewsRecord(crawlerRequestRecord,articleId));
            parsedLinks.add(getLikesRecord(crawlerRequestRecord,articleId));

            //根据评论数和是否采集评论标志选择生成评论链接
            CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
            if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
                if (Integer.parseInt(comments) == 0){
                    return parsedLinks;
                }
                if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                    logger.error("sohu news crawler comment need to filter information!");
                    return parsedLinks;
                }
                KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
                CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);
                String commentUrl = String.format(commentUrlFormat, topicId, 1, articleId);
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .notFilterRecord()
                        .copyBizTags()
                        .build();
                commentRecord.setFilter(filterInfoRecord.getFilter());
                commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
                commentRecord.getHttpRequest().addExtra("articleId",articleId);
                commentRecord.getHttpRequest().addExtra("topicId",topicId);
                commentRecord.getHttpRequest().addExtra("articleUrl", (String) extras.get("articleUrl"));
                commentRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
                parsedLinks.add(commentRecord);
            }
        } catch (Exception e) {
            logger.error("parse comments num page error");
            crawlerRequestRecord.setNeedWashPage(false);
        }
        return parsedLinks;
    }

    private CrawlerRequestRecord getViewsRecord(CrawlerRequestRecord crawlerRequestRecord, String articleId) {
        String viewsUrl = String.format(viewsUrlFormat,articleId);
        CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(viewsUrl)
                .recordKey(viewsUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        requestRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
        return requestRecord;
    }

    private CrawlerRequestRecord getLikesRecord(CrawlerRequestRecord crawlerRequestRecord, String articleId) {
        String likesUrl = String.format(likesUrlFormat,articleId);
        CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(likesUrl)
                .recordKey(likesUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        requestRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
        return requestRecord;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setNeedWashPage(false);
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String substringUrl = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1);
        String articleId = substringUrl.split("_")[0];
        String authorId = substringUrl.split("_")[1];
        Map<String,Object> extras = new HashMap<>();
        extras.put("articleId",articleId);
        extras.put("articleUrl",httpRequestUrl);
        extras.put("authorId",authorId);
        //生成外循环comments接口
        String commentsNumUrl = String.format(commentsNumUrlFormat, articleId);
        CrawlerRequestRecord commentsNumRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(commentsNumUrl)
                .recordKey(commentsNumUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()
                .needParsed(true)
                .needWashed(false)
                .build();
        commentsNumRecord.getHttpRequest().setExtras(extras);
        commentsNumRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
        commentsNumRecord.setInternalDownloadPage(httpPage);
        parsedLinks.add(commentsNumRecord);

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
        if (null != urlParams){
            String sceneId = (String) urlParams.get("sceneId");
            String page = (String) urlParams.get("page");
            int curPn = Integer.parseInt(page);
            if (curPn < 50){
                curPn += 1;
                String nextPageUrl = String.format(listPageUrlFormat,sceneId,curPn);
                CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(nextPageUrl)
                        .recordKey(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                nextPageRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
                parsedLinks.add(nextPageRecord);
            }
        }

        JSONArray objects = JSONObject.parseArray(httpPage.getRawText());
        for (Object object : objects) {
            JSONObject dataObj = (JSONObject)object;
            String articleId = dataObj.getString("id");
            String authorId = dataObj.getString("authorId");
            String articleUrl = String.format(articleUrlFormat,articleId,authorId);
            String pubTime = dataObj.getString("publicTime");
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(articleUrl)
                    .recordKey(articleUrl)
                    .releaseTime(new Long(pubTime))
                    .copyBizTags()
                    .needParsed(true)
                    .needWashed(false)
                    .resultLabelTag(article)
                    .resultLabelTag(interaction)
                    .build();
            itemRecord.getHttpRequest().addHeader("User-Agent",getRandomUA());
            parsedLinks.add(itemRecord);
        }

        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpRequest httpRequest = internalDownloadRecord.getHttpRequest();
            HttpPage downloadPage = internalDownloadRecord.getInternalDownloadPage();
            if (!downloadPage.isDownloadSuccess()){
                crawlerRecord.setNeedWashPage(false);
                crawlerRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                links.add(crawlerRecord);
                break;
            }
            String httpRequestUrl = httpRequest.getUrl();
            if (httpRequestUrl.matches(viewsUrlRegex)){
                try {
                    crawlerRecord.getHttpRequest().addExtra("views",downloadPage.getRawText().trim());
                }catch (Exception e){
                    crawlerRecord.getHttpRequest().addExtra("views","0");
                }
            }
            if (httpRequestUrl.matches(likesUrlRegex)){
                try {
                    JSONObject jsonObject = JSONObject.parseObject(downloadPage.getRawText());
                    String likes = jsonObject.getJSONObject("data").getString("topicLikeCount");
                    crawlerRecord.getHttpRequest().addExtra("likes",likes);
                }catch (Exception e){
                    crawlerRecord.getHttpRequest().addExtra("likes","0");
                }
            }

        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String requestUrl = httpRequest.getUrl();
        String articleId = (String) extras.get("articleId");
        String articleUrl = (String) extras.get("articleUrl");
        String rawText = httpPage.getRawText();
        if (StringUtils.isBlank(rawText)){
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            HttpPage internalDownloadPage = crawlerRequestRecord.getInternalDownloadPage();
            Html html = new Html(internalDownloadPage.getRawText());
            String authorId = (String) extras.get("authorId");
            String comments = (String) extras.get("comments");
            String likes = (String) extras.get("likes");
            String views = (String) extras.get("views");

            List<String> titles = html.xpath("//div[@class=\"text-title\"]/h1//text()|//h3[@class=\"article-title\"]//text()").all();
            StringBuffer sbTitle = new StringBuffer();
            for (String title : titles) {
                sbTitle.append(title.trim());
            }
            String author = html.xpath("//div[@id=\"user-info\"]/h4/a/text()|//p[@class=\"author-name\"]/text()").get();

            List<String> contents = html.xpath("//article[@class=\"article\"]//p//text()|//article[@class=\"article-text\"]//p//text()").all();
            StringBuffer sbContent = new StringBuffer();
            for (String content : contents) {
                sbContent.append(content.trim());
            }
            List<String> images = html.xpath("//article[@class=\"article\"]//p//img/@src|//article[@class=\"article-text\"]//p//img/@src").all();
            StringBuffer sbImage = new StringBuffer();
            for (String image : images) {
                sbImage.append(image).append("\\x01");
            }

            CrawlerData crawlerAData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(articleUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .dataId(StringUtils.joinWith("-",domain(),site,article,articleId))
                    .addContentKV(Field_Title,unescapeHtml2J(sbTitle.toString()))
                    .addContentKV(Field_Author,author)
                    .addContentKV(Field_Author_Id,authorId)
                    .addContentKV(Field_Content,sbContent.toString())
                    .addContentKV(Field_Images,sbImage.toString())
                    .resultLabelTag(article)
                    .build();
            crawlerDataList.add(crawlerAData);

            if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                CrawlerData crawlerIData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(articleUrl)
                        .releaseTime(System.currentTimeMillis())
                        .dataId(StringUtils.joinWith("-",domain(),site,interaction,articleId))
                        .parentId(StringUtils.joinWith("-",domain(),site,article,articleId))
                        .addContentKV(Field_I_Comments,comments)
                        .addContentKV(Field_I_Likes,likes)
                        .addContentKV(Field_I_Views,views)
                        .resultLabelTag(interaction)
                        .build();
                crawlerDataList.add(crawlerIData);
            }

        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)){
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONObject jsonObject = pageObj.getJSONObject("jsonObject");
            JSONArray commentObjs = jsonObject.getJSONArray("comments");
            for (Object object : commentObjs) {
                JSONObject commentObj = (JSONObject)object;
                String commentId = commentObj.getString("comment_id");
                String time = commentObj.getString("create_time");
                String content = commentObj.getString("content");
                String author = commentObj.getJSONObject("passport").getString("nickname");
                String authorId = commentObj.getString("user_id");
                String likes = commentObj.getString("support_count");
                String comments = commentObj.getString("reply_count");

                CrawlerData crawlerCData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(articleUrl)
                        .releaseTime(new Long(time))
                        .dataId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                        .parentId(StringUtils.joinWith("-",domain(),site,article,articleId))
                        .addContentKV(Field_Content,content)
                        .addContentKV(Field_Author,author)
                        .addContentKV(Field_Author_Id,authorId)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .resultLabelTag(comment)
                        .build();
                crawlerDataList.add(crawlerCData);

                if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                    CrawlerData crawlerIData = CrawlerData.builder()
                            .data(crawlerRequestRecord,httpPage)
                            .url(articleUrl)
                            .releaseTime(new Long(time))
                            .dataId(StringUtils.joinWith("-",domain(),site,interaction,commentId))
                            .parentId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                            .addContentKV(Field_I_Likes,likes)
                            .addContentKV(Field_I_Comments,comments)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .resultLabelTag(interaction)
                            .build();
                    crawlerDataList.add(crawlerIData);
                }



            }

        }
        return crawlerDataList;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexUrlRegex);
        addUrlRegular(listPageUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentsNumUrlRegex);
        addUrlRegular(commentUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return "sohu";
    }

    /**
     * 检查页面下载是否成功、完整
     * @param crawlerRequestRecord last record
     * @param httpPage page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200){
            logger.error("download page {} error, status code is {}",lastRequestUrl,statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()){
            logger.error("download page failed, check your link {}",lastRequestUrl);
            return true;
        }
        if (StringUtils.isBlank(httpPage.getRawText())){
            logger.error("download page empty, check your link {}",lastRequestUrl);
            return true;
        }
        return false;
    }

    /**
     * 将url参数转换成map
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = url;
        if (url.contains("?")){
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    public static Map<String, Object> copyExtras(Map<String,Object> inExtras){
        Map<String,Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(),entry.getValue());
        }
        return extras;
    }

    private static List<String> agentList = new ArrayList<>();

    static {
        agentList.add("Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)");
        agentList.add("Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.28.3 (KHTML, like Gecko) Version/3.2.3 ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/525.28.3");
        agentList.add("Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16");
        agentList.add("Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14");
        agentList.add("Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14");
        agentList.add("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14");
        agentList.add("Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02");
        agentList.add("Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0");
        agentList.add("Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.13 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400");
    }

    private static String getRandomUA(){
        return agentList.get(RandomUtils.nextInt(0,agentList.size() - 1));
    }

    public static <T> Map<T,T> castMap(Object obj, Class<T> clazz){
        Map<T,T> result = new HashMap<>();
        if (obj instanceof Map<?,?>){
            Set<?> keySet = ((Map<?, ?>) obj).keySet();
            Set<? extends Map.Entry<?, ?>> entries = ((Map<?, ?>) obj).entrySet();
            for (Map.Entry<?, ?> entry : entries) {
                result.put(clazz.cast(entry.getKey()),clazz.cast(entry.getValue()));
            }
            return result;
        }
        return null;
    }

    public static String unescapeHtml2J(String str){
        int times = 0;
        while (str.contains("&") && str.contains(";")){
            str = StringEscapeUtils.unescapeHtml(str);
            times ++;
            if (times > 5){
                break;
            }
        }
        return str;
    }
}
