package com.chance.cc.crawler.development.scripts.yidianzixun;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.URLEncoder;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2021-04-14 14:11:36
 * @email okprog@sina.com
 */
public class YDZXCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(YDZXCrawlerScript.class);

    public static final String indexUrlRegex = "https?://www\\.yidianzixun\\.com/";
    public static final String listPageUrlRegex = "https?://www\\.yidianzixun\\.com/home/q/news_list_for_channel\\S*";
    public static final String articleUrlRegex = "https?://www\\.yidianzixun\\.com/article/\\S*";
    public static final String commentUrlRegex = "https?://www\\.yidianzixun\\.com/home/q/getcomments\\S*";

    public static final String listPageUrlFormat = "https://www.yidianzixun.com/home/q/news_list_for_channel?channel_id=%s&cstart=%s&cend=%s&infinite=true&refresh=1&__from__=wap&_spt=%s&appid=web_yidian&_=%s";
    public static final String listUrlSimpleFormat = "/home/q/news_list_for_channel?channel_id=%s&cstart=%s&cend=%s&infinite=true&refresh=1&__from__=wap&appid=web_yidian&_=%s";
    public static final String articleUrlFormat = "https://www.yidianzixun.com/article/%s";
    public static final String commentUrlFormat = "https://www.yidianzixun.com/home/q/getcomments?docid=%s&count=30&last_comment_id=%s&appid=web_yidian&_=%s";

    private static final String scriptSite = "news";

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        if (404 == httpPage.getStatusCode()){
            logger.info("status code is 404");
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        if (doHttpPageCheck(crawlerRequestRecord,httpPage)){
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(indexUrlRegex)){
            return parseIndexLinks(crawlerRequestRecord, parsedLinks, lastRequest);
        }
        if (lastRequestUrl.matches(listPageUrlRegex)){
            return parseListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex)){
            return parseArticleLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(commentUrlRegex)){
            return parseCommentLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        int total = pageObj.getIntValue("total");
        JSONArray comments = pageObj.getJSONArray("comments");
        if (comments != null && comments.size() > 0){
            crawlerRequestRecord.setNeedWashPage(true);
            if (comments.size() >= 20){
                String articleKey = (String) extras.get("articleKey");
                JSONObject cmtObj = comments.getJSONObject(comments.size() - 1);
                String commentId = cmtObj.getString("comment_id");
                String commentUrl = String.format(commentUrlFormat,articleKey,commentId,System.currentTimeMillis());
                CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(commentUrl)
                        .recordKey(commentUrl)
                        .releaseTime(System.currentTimeMillis())
                        .resultLabelTag(comment)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                commentRecord.getHttpRequest().setExtras(extras);
                parsedLinks.add(commentRecord);
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String rawText = httpPage.getRawText();
        if (rawText.contains("文章没有找到哦")){
            logger.info("article is deleted");
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        crawlerRequestRecord.setNeedWashPage(true);
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String articleKey = httpRequestUrl.substring(httpRequestUrl.lastIndexOf("/") + 1);
        httpRequest.addExtra("articleKey",articleKey);
        httpRequest.addExtra("articleUrl",httpRequestUrl);

        String commentUrl = String.format(commentUrlFormat,articleKey,"",System.currentTimeMillis());
        CrawlerRequestRecord commentsRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .recordKey(commentUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(commentsRecord);

        //判断是否采集评论
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
        if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
            if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                logger.error("yidianzixun news crawler comment need to filter information!");
                return parsedLinks;
            }
            KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .notFilterRecord()
                    .copyBizTags()
                    .build();
            commentRecord.setFilter(filterInfoRecord.getFilter());
            commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            commentRecord.getHttpRequest().addExtra("articleKey",articleKey);
            commentRecord.getHttpRequest().addExtra("articleUrl",httpRequestUrl);
            parsedLinks.add(commentRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        JSONArray resultObj = pageObj.getJSONArray("result");
        if (null == resultObj){
            logger.error("parse page json error");
            return parsedLinks;
        }
        if (null != urlParams && resultObj.size() > 0){
            int cstart = Integer.parseInt((String) urlParams.get("cstart"));
            int cend = Integer.parseInt((String) urlParams.get("cend"));
            String channelId = (String) urlParams.get("channel_id");
            cstart += 10;
            cend += 10;
            long currentTimeMillis = System.currentTimeMillis();
            String paramUrl = String.format(listUrlSimpleFormat, channelId, cstart, cend, currentTimeMillis);
            String[] args = new String[4];
            args[0] = paramUrl;
            args[1] = channelId;
            args[2] = String.valueOf(cstart);
            args[3] = String.valueOf(cend);
            String spt = getSpt(args);
            String listUrl = String.format(listPageUrlFormat,channelId,cstart,cend,URLEncoder.encode(spt),currentTimeMillis);
            CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(listUrl)
                    .recordKey(listUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            listRecord.getHttpRequest().setHeaders(httpRequest.getHeaders());
            parsedLinks.add(listRecord);

            for (Object o : resultObj) {
                JSONObject dataObj = (JSONObject)o;
                String articleKey = dataObj.getString("itemid");
                String pubTime = dataObj.getString("date");
                String articleUrl = String.format(articleUrlFormat,articleKey);
                try {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(articleUrl)
                            .recordKey(articleUrl)
                            .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                            .copyBizTags()
                            .resultLabelTag(article)
                            .resultLabelTag(interaction)
                            .needParsed(true)
                            .build();
                    parsedLinks.add(itemRecord);
                } catch (Exception e) {
                    logger.error(e.getMessage());
                }
            }
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseIndexLinks(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> parsedLinks, HttpRequest lastRequest) {
        Map<String, Object> extras = lastRequest.getExtras();
        Map<String, String> keysMap = castMap(extras.get("keysMap"),String.class);
        Set<Map.Entry<String, String>> entries = keysMap.entrySet();
        for (Map.Entry<String, String> entry : entries) {
            String channelId = entry.getValue();
            String channelName = entry.getKey();
            String cStart = "0";
            String cEnd = "10";
            long currentTimeMillis = System.currentTimeMillis();
            String paramUrl = String.format(listUrlSimpleFormat, channelId, cStart, cEnd, currentTimeMillis);
            String[] args = new String[4];
            args[0] = paramUrl;
            args[1] = channelId;
            args[2] = cStart;
            args[3] = cEnd;
            String spt = getSpt(args);

            String listUrl = String.format(listPageUrlFormat,channelId,cStart,cEnd, URLEncoder.encode(spt),currentTimeMillis);
            CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(listUrl)
                    .recordKey(listUrl)
                    .releaseTime(currentTimeMillis)
                    .notFilterRecord()
                    .copyBizTags()
                    .build();
            requestRecord.tagsCreator().bizTags().addCustomKV(Tag_Site_Info,channelName);
            requestRecord.getHttpRequest().addHeader("Referer","http://www.yidianzixun.com/");
            requestRecord.getHttpRequest().addHeader("User-Agent","Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1");
            requestRecord.getHttpRequest().addHeader("X-Requested-With","XMLHttpRequest");
            requestRecord.getHttpRequest().addHeader("Cookie","wuid=871443882239128; wuid_createAt=2021-02-03 14:05:54; JSESSIONID=103956b431f933f711909e7081f8b7ddb5e31c5fb242e663049edb4c2e7b25dd; aliyungf_tc=b03cba3f902f4d5c6cdee4ec1149903f15ce11d2710b4a252ac1ce9e9d9b4fb0; Hm_lvt_15fafbae2b9b11d280c79eff3b840e45=1618219768,1618285348,1618380745,1618382455; captcha=s%3Aa67b147ee1d06ba53baa5ced1cfa9963.PJPQSYZpiDUPjV8KUVZe%2FYpjSRTwFoTnt9%2FShLlSlOo; Hm_lpvt_15fafbae2b9b11d280c79eff3b840e45=1618382553");
            parsedLinks.add(requestRecord);
        }
        return parsedLinks;
    }


    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpRequest httpRequest = internalDownloadRecord.getHttpRequest();
            HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
            if (!internalDownloadPage.isDownloadSuccess()){
                crawlerRecord.setNeedWashPage(false);
                crawlerRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
                links.add(crawlerRecord);
                break;
            }
            String requestUrl = httpRequest.getUrl();
            if (requestUrl.matches(commentUrlRegex)){
                try {
                    JSONObject pageObj = JSONObject.parseObject(internalDownloadPage.getRawText());
                    String comments = pageObj.getString("total");
                    crawlerRecord.getHttpRequest().addExtra("comments",comments);
                }catch (Exception e){
                    crawlerRecord.getHttpRequest().addExtra("comments","0");
                }

            }

        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String requestUrl = httpRequest.getUrl();
        String articleKey = (String) extras.get("articleKey");
        String articleUrl = (String) extras.get("articleUrl");
        String rawText = httpPage.getRawText();
        if (StringUtils.isBlank(rawText)){
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            Html html = httpPage.getHtml();
            String title = html.xpath("//h2/text()").get();
            String author = html.xpath("//a[@class=\"wemedia-name\"]/text()").get();
            String authorId = html.xpath("//a[@class=\"wemedia-subscribe\"]/@data-id").get();
            List<String> contents = html.xpath("//div[@class=\"content-bd\"]//p//text()").all();
            List<String> images = html.xpath("//div[@class=\"content-bd\"]//img/@src").all();
            StringBuffer sbContent = new StringBuffer();
            for (String content : contents) {
                sbContent.append(content);
            }
            StringBuffer sbImage = new StringBuffer();
            for (String image : images) {
                sbImage.append(image).append("\\x01");
            }
            String comments = (String) extras.get("comments");
            CrawlerData crawlerAData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(articleUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .dataId(StringUtils.joinWith("-",domain(),site,article,articleKey))
                    .addContentKV(Field_Title,title)
                    .addContentKV(Field_Author,author)
                    .addContentKV(Field_Author_Id,authorId)
                    .addContentKV(Field_Content,sbContent.toString())
                    .addContentKV(Field_Images,sbImage.toString())
                    .resultLabelTag(article)
                    .build();
            crawlerDataList.add(crawlerAData);

            if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                CrawlerData crawlerIData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(articleUrl)
                        .releaseTime(System.currentTimeMillis())
                        .dataId(StringUtils.joinWith("-",domain(),site,interaction,articleKey))
                        .parentId(StringUtils.joinWith("-",domain(),site,article,articleKey))
                        .addContentKV(Field_I_Comments,comments)
                        .resultLabelTag(interaction)
                        .build();
                crawlerDataList.add(crawlerIData);
            }

        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)){
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONArray allComments = pageObj.getJSONArray("comments");
            if (allComments.size() > 0){
                for (Object allComment : allComments) {
                    JSONObject cmtObj = (JSONObject)allComment;
                    String commentId = cmtObj.getString("comment_id");
                    String content = cmtObj.getString("comment");
                    String author = cmtObj.getString("nickname");
                    String authorId = cmtObj.getString("userid");
                    String likes = cmtObj.getString("like");
                    likes = StringUtils.isBlank(likes) ? "0" : likes;
                    String pubTime = cmtObj.getString("createAt");
                    long releaseTime = 0;
                    try {
                        releaseTime = DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime();
                    }catch (Exception e){
                        logger.error(e.getMessage());
                        continue;
                    }

                    CrawlerData crawlerCData = CrawlerData.builder()
                            .data(crawlerRequestRecord,httpPage)
                            .url(articleUrl)
                            .releaseTime(releaseTime)
                            .dataId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                            .parentId(StringUtils.joinWith("-",domain(),site,article,articleKey))
                            .addContentKV(Field_Content,content)
                            .addContentKV(Field_Author,author)
                            .addContentKV(Field_Author_Id,authorId)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .resultLabelTag(comment)
                            .build();
                    crawlerDataList.add(crawlerCData);

                    if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                        CrawlerData crawlerIData = CrawlerData.builder()
                                .data(crawlerRequestRecord,httpPage)
                                .url(articleUrl)
                                .releaseTime(releaseTime)
                                .dataId(StringUtils.joinWith("-",domain(),site,interaction,commentId))
                                .parentId(StringUtils.joinWith("-",domain(),site,comment,commentId))
                                .addContentKV(Field_I_Likes,likes)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                                .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                                .resultLabelTag(interaction)
                                .build();
                        crawlerDataList.add(crawlerIData);
                    }
                }
            }

        }
        return crawlerDataList;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexUrlRegex);
        addUrlRegular(listPageUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return "yidianzixun";
    }

    /**
     * 检查页面下载是否成功、完整
     * @param crawlerRequestRecord last record
     * @param httpPage page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200){
            logger.error("download page {} error, status code is {}",lastRequestUrl,statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()){
            logger.error("download page failed, check your link {}",lastRequestUrl);
            return true;
        }
        if (StringUtils.isBlank(httpPage.getRawText())){
            logger.error("download page empty, check your link {}",lastRequestUrl);
            return true;
        }
        return false;
    }

    /**
     * 将url参数转换成map
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = url;
        if (url.contains("?")){
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    public static Map<String, Object> copyExtras(Map<String,Object> inExtras){
        Map<String,Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(),entry.getValue());
        }
        return extras;
    }

    private static List<String> agentList = new ArrayList<>();

    static {
        agentList.add("Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)");
        agentList.add("Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)");
        agentList.add("Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2");
        agentList.add("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.28.3 (KHTML, like Gecko) Version/3.2.3 ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/525.28.3");
        agentList.add("Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16");
        agentList.add("Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14");
        agentList.add("Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14");
        agentList.add("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14");
        agentList.add("Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02");
        agentList.add("Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1");
        agentList.add("Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0");
        agentList.add("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0");
        agentList.add("Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.13 Safari/537.36");
        agentList.add("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400");
    }

    private static String getRandomUA(){
        return agentList.get(RandomUtils.nextInt(0,agentList.size() - 1));
    }

    public static <T> Map<T,T> castMap(Object obj, Class<T> clazz){
        Map<T,T> result = new HashMap<>();
        if (obj instanceof Map<?,?>){
            Set<?> keySet = ((Map<?, ?>) obj).keySet();
            Set<? extends Map.Entry<?, ?>> entries = ((Map<?, ?>) obj).entrySet();
            for (Map.Entry<?, ?> entry : entries) {
                result.put(clazz.cast(entry.getKey()),clazz.cast(entry.getValue()));
            }
            return result;
        }
        return null;
    }

    public static String unescapeHtml2J(String str){
        int times = 0;
        while (str.contains("&") && str.contains(";")){
            str = StringEscapeUtils.unescapeHtml(str);
            times ++;
            if (times > 5){
                break;
            }
        }
        return str;
    }

    public static String getSpt(String[] args){
        StringBuilder a = new StringBuilder();
        StringBuilder o = new StringBuilder("sptoken");
        for (int c = 1; c < args.length; c++) {
            o.append(args[c]);
        }
        for (int c = 0; c < o.length(); c++) {
            try {
                int r = 10 ^ (int)o.charAt(c);
                a.append(unicode2String(r));
            } catch (Exception e) {
                System.out.println("-------"+o.charAt(c)+"------");
            }
        }
        return a.toString();
    }

    public static String unicode2String(int code){
        return String.valueOf((char) code);
    }


    @Test
    public void testSpt(){
//        String url = "/home/q/news_list_for_channel?channel_id=11678397984&cstart=10&cend=20&infinite=true&refresh=1&__from__=wap&appid=web_yidian&_=1618382556971";
//        String channel_id = "11678397984";
//        String cstart = "10";
//        String cend = "20";
//        String[] args = new String[]{url,channel_id,cstart,cend};
//        String spt = getSpt(args);
//        System.out.println(spt);

    }


}
