package com.chance.cc.crawler.development.scripts.ifeng.health;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.development.scripts.wangyi.health.WYHealthCrawlerScript;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2021-02-09 11:16:40
 * @email okprog@sina.com
 */
public class IFengHealthCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(WYHealthCrawlerScript.class);

    public static final String indexRegex = "https?://health\\.ifeng\\.com/";
    public static final String tagListRegex = "https?://health\\.ifeng\\.com/shanklist/12-\\S*";
    public static final String listUrlRegex = "https:?//shankapi\\.ifeng\\.com/shanklist/_/getColumnInfo/_/default/\\d*/\\d*/\\S*/getColumnInfoCallback";
    public static final String articleUrlRegex = "https:?//\\w*\\.ifeng\\.com/c/\\S*";
    public static final String commentsRegex = "https://comment\\.ifeng\\.com/get\\.php\\S*";
    public static final String sportsListUrlRegex = "https://shankapi.ifeng.com/season/sports/index/newStreamInitData_v2/getDynamicFragment\\S*";
    public static final String sportsIndexRegex = "https?://sports\\.ifeng\\.com/";

    public static final String sportsListUrlFormat = "https://shankapi.ifeng.com/season/sports/index/newStreamInitData_v2/getDynamicFragment/11-/%s/getSportsStreamInitData?page=%s";
    public static final String listUrlFormat = "https://shankapi.ifeng.com/shanklist/_/getColumnInfo/_/default/%s/%s/20/%s/getColumnInfoCallback";
    public static final String commentsFormat = "https://comment.ifeng.com/get.php?orderby=uptimes&docUrl=%s&format=json&job=1&p=%s&pageSize=10";

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        int statusCode = httpPage.getStatusCode();
        String lastRequestUrl = lastRequest.getUrl();
        if (StringUtils.isBlank(httpPage.getRawText()) || !httpPage.isDownloadSuccess() || (statusCode != 200 && statusCode != 404)){
            parsedLinks.add(crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.getHttpRequest().getHeaders().remove("Host");
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            logger.error("{} --> this link has been reload status code is :{}",lastRequestUrl, statusCode);
            return parsedLinks;
        }
        if (statusCode == 404){
            logger.error("{} --> this link is 404", lastRequestUrl);
            return parsedLinks;
        }
        if (lastRequestUrl.matches(indexRegex)){
            return parseIndexLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(sportsIndexRegex)){
            return parseSportsIndexLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(listUrlRegex) || lastRequestUrl.matches(tagListRegex)){
            return parseListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(sportsListUrlRegex)){
            return parseSportsListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex)){
            return parseArticleLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(commentsRegex)){
            return parseCommentLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseSportsListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
        if (null != urlParams){
            int page = Integer.parseInt((String) urlParams.get("page"));
            page += 1;
            String nextPageUrl = String.format(sportsListUrlFormat,page,page);
            CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(listRecord);
        }

        String rawText = httpPage.getRawText();
        String jsonStr = rawText.substring(rawText.indexOf("(") + 1, rawText.lastIndexOf(")"));
        JSONObject pageObj = JSONObject.parseObject(jsonStr);
        JSONArray items = pageObj.getJSONArray("data");
        for (Object item : items) {
            JSONObject itemObj = (JSONObject)item;
            String articleKey = itemObj.getString("id");
            String docUrl = itemObj.getString("commentUrl");
            String articleUrl = itemObj.getString("url");
            String pubTime = itemObj.getString("newsTime");
            Map<String,Object> extras = new HashMap<>();
            extras.put("docUrl",docUrl);
            extras.put("articleKey",articleKey);
            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(articleUrl)
                        .recordKey(articleUrl)
                        .releaseTime(DateUtils.parseDate(pubTime,"yyyy-MM-dd HH:mm:ss").getTime())
                        .copyBizTags()
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .build();
                itemRecord.getHttpRequest().setExtras(extras);
                parsedLinks.add(itemRecord);

            }catch (Exception e){
                logger.error(e.getMessage());
            }

        }


        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseSportsIndexLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String listUrl = String.format(sportsListUrlFormat,1,1);
        CrawlerRequestRecord listRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(listUrl)
                .recordKey(listUrl)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .build();
        parsedLinks.add(listRecord);
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String httpRequestUrl = httpRequest.getUrl();
        String docUrl = (String) extras.get("docUrl");
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        int count = pageObj.getIntValue("count");
        JSONArray comments = pageObj.getJSONArray("comments");
        Map<String, Object> urlParams = getUrlParams(httpRequestUrl);
        if (urlParams != null){
            int p = Integer.parseInt((String) urlParams.get("p"));
            if (count > 0 && comments.size() > 0){
                crawlerRequestRecord.setNeedWashPage(true);
                crawlerRequestRecord.tagsCreator().resultTags().addResultDataType(comment);
                crawlerRequestRecord.tagsCreator().resultTags().addResultDataType(interaction);
                if (comments.size() == 10){
                    String commentUrl = String.format(commentsFormat,docUrl,(p + 1));
                    CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(commentUrl)
                            .recordKey(commentUrl)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .copyBizTags()
                            .build();
                    commentRecord.getHttpRequest().setExtras(copyExtras(extras));
                    parsedLinks.add(commentRecord);
                }
            }

        }

        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        Map<String, Object> extras = httpRequest.getExtras();
        String docUrl = (String) extras.get("docUrl");
        String commentUrl = String.format(commentsFormat, docUrl, 1);
        CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(commentUrl)
                .recordKey(commentUrl)
                .releaseTime(System.currentTimeMillis())
                .requestLabelTag(CrawlerEnum.CrawlerRequestType.internalDownload)
                .build();
        parsedLinks.add(interactionRecord);

        if (crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site").equalsIgnoreCase("health")){
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .build();
            commentRecord.getHttpRequest().setExtras(copyExtras(extras));
            parsedLinks.add(commentRecord);
        }

        //判断是否采集评论
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
        if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
            if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
                logger.error("ifeng news crawler comment need to filter information!");
                return parsedLinks;
            }
            KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .releaseTime(System.currentTimeMillis())
                    .notFilterRecord()
                    .copyBizTags()
                    .build();
            commentRecord.getHttpRequest().setExtras(copyExtras(extras));
            commentRecord.setFilter(filterInfoRecord.getFilter());
            commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            parsedLinks.add(commentRecord);

        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        String httpRequestUrl = httpRequest.getUrl();
        String rawText = httpPage.getRawText();
        try {
            String nextPageUrl = "";
            JSONObject pageObj = null;
            if (httpRequestUrl.matches(tagListRegex)) {
                Matcher mtData = Pattern.compile("var allData = (\\{.*\\});").matcher(rawText);
                while (mtData.find()) {
                    String allData = mtData.group(0).split("allData =")[1].trim();
                    allData = allData.substring(0,allData.lastIndexOf(";"));
                    pageObj = JSONObject.parseObject(allData);
                }
            }
            if (httpRequestUrl.matches(listUrlRegex)){
                pageObj = JSONObject.parseObject(rawText);
            }
            if (null != pageObj){
                JSONArray newsStreams = pageObj.getJSONArray("newsstream");
                String columnId = pageObj.getString("columnId");
                JSONObject lastData = newsStreams.getJSONObject(newsStreams.size() - 1);
                String lastDataTime = lastData.getString("newsTime");
                String lastDataTimeStr = String.valueOf(DateUtils.parseDate(lastDataTime, "yyyy-MM-dd HH:mm:ss").getTime());
                String lastDataId = lastData.getString("id");
                nextPageUrl = String.format(listUrlFormat, lastDataId, lastDataTimeStr, columnId);
                CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(nextPageUrl)
                        .recordKey(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .build();
                parsedLinks.add(nextPageRecord);
                for (Object newsStream : newsStreams) {
                    JSONObject newsObj = (JSONObject)newsStream;
                    String itemUrl = newsObj.getString("url");
                    String pubDate = newsObj.getString("newsTime");
                    Map<String,Object> extras = new HashMap<>();
                    String docUrl = newsObj.getString("commentUrl");
                    String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1);
                    extras.put("docUrl",docUrl);
                    extras.put("articleKey",articleKey);
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(itemUrl)
                            .recordKey(itemUrl)
                            .releaseTime(DateUtils.parseDate(pubDate,"yyyy-MM-dd HH:mm:ss").getTime())
                            .copyBizTags()
                            .resultLabelTag(article)
                            .resultLabelTag(interaction)
                            .build();
                    itemRecord.getHttpRequest().setExtras(extras);
                    parsedLinks.add(itemRecord);
                }
            }
        } catch (Exception e) {
            logger.error("{} ---> this tag link parse error",httpRequestUrl);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseIndexLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        String rawText = httpPage.getRawText();
        Matcher mtData = Pattern.compile("var allData = (\\{.*\\});").matcher(rawText);
        while (mtData.find()) {
            String allData = mtData.group(0).split("allData =")[1].trim();
            allData = allData.substring(0,allData.lastIndexOf(";"));
            JSONObject pageObj = JSONObject.parseObject(allData);
            JSONArray topNavs = pageObj.getJSONArray("topNav");
            for (Object allTag : topNavs) {
                JSONObject topNav = (JSONObject)allTag;
                String tagUrl = topNav.getString("url");
                tagUrl = "https:" + tagUrl;
                if (!tagUrl.matches(tagListRegex)){
                    continue;
                }
                String tagName = topNav.getString("title");
                List<String> path = new ArrayList<>();
                path.add(tagName);
                CrawlerRequestRecord tagRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(tagUrl)
                        .recordKey(tagUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .build();
                tagRecord.tagsCreator().bizTags().addCustomKV(Field_Path,path);
                parsedLinks.add(tagRecord);
            }
        }
        return parsedLinks;
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        HttpRequest httpRequest = crawlerRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpRequest internalDownloadRecordHttpRequest = internalDownloadRecord.getHttpRequest();
            String downloadRecordHttpRequestUrl = internalDownloadRecordHttpRequest.getUrl();
            if (downloadRecordHttpRequestUrl.matches(commentsRegex)){
                try {
                    JSONObject pageObject = JSONObject.parseObject(internalDownloadRecord.getInternalDownloadPage().getRawText());
                    String comments = pageObject.getString("count");
                    extras.put("comments",comments);
                } catch (Exception e) {
                    extras.put("comments","0");
                }
            }
        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String articleKey= (String) extras.get("articleKey");
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        if (StringUtils.isBlank(httpPage.getRawText())){
            return null;
        }
        //清洗文章
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.article)){
            Html html = httpPage.getHtml();
            String title = html.xpath("//h1/text()").get();
            String source = html.xpath("//span[contains(@class,\"source\")]/a/text()").get();
            if (StringUtils.isBlank(source)){
                source = "凤凰网";
            }
            String rawText = httpPage.getRawText();
            Matcher mtText = Pattern.compile("text-(.*?)\"").matcher(rawText);
            String divClass = "";
            while (mtText.find()){
                divClass = mtText.group(0).replace("\"","");
            }
            List<String> allContents = html.xpath("//div[@class=\""+divClass+"\"]/p//text()").all();
            StringBuffer sbContent = new StringBuffer();
            for (String allContent : allContents) {
                sbContent.append(allContent);
            }
            List<String> allImages = html.xpath("//div[@class=\"" + divClass + "\"]/p//img/@src").all();
            StringBuffer sbImage = new StringBuffer();
            for (String allImage : allImages) {
                sbImage.append(allImage).append("\\x01");
            }
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(httpRequest.getUrl())
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_Title,title)
                    .addContentKV(Field_Content,sbContent.toString())
                    .addContentKV(Field_Source,source)
                    .addContentKV(Field_Images,sbImage.toString())
                    .resultLabelTag(article)
                    .build();
            crawlerDataList.add(crawlerData);
            if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.interaction)){
                String comments = (String) extras.get("comments");
                CrawlerData crawlerInteractionData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(httpRequest.getUrl())
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), articleKey))
                        .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                        .releaseTime(System.currentTimeMillis())
                        .addContentKV(Field_I_Comments,comments)
                        .resultLabelTag(interaction)
                        .build();
                crawlerDataList.add(crawlerInteractionData);
            }
        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(CrawlerEnum.CrawlerDataType.comment)){
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONArray allComments = pageObj.getJSONArray("comments");
            for (Object allComment : allComments) {
                JSONObject cmtObj = (JSONObject)allComment;
                String commentId = cmtObj.getString("comment_id");
                String author = cmtObj.getString("uname");
                String authorId = cmtObj.getString("user_id");
                String pubTime = cmtObj.getString("comment_date");
                String content = cmtObj.getString("comment_contents");
                try {
                    CrawlerData crawlerData = CrawlerData.builder()
                            .data(crawlerRequestRecord,httpPage)
                            .url(httpRequest.getUrl())
                            .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                            .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, article.enumVal(), articleKey))
                            .releaseTime(DateUtils.parseDate(pubTime,"yyyy/MM/dd HH:mm").getTime())
                            .addContentKV(Field_Content,content)
                            .addContentKV(Field_Author,author)
                            .addContentKV(Field_Author_Id,authorId)
                            .resultLabelTag(comment)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .build();
                    crawlerDataList.add(crawlerData);
                } catch (ParseException e) {
                    logger.error("parse date error");
                }
                if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
                    String likes = cmtObj.getString("uptimes");
                    CrawlerData crawlerInteractionData = CrawlerData.builder()
                            .data(crawlerRequestRecord,httpPage)
                            .url(httpRequest.getUrl())
                            .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, interaction.enumVal(), commentId))
                            .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), site, comment.enumVal(), commentId))
                            .releaseTime(System.currentTimeMillis())
                            .addContentKV(Field_I_Likes,likes)
                            .resultLabelTag(interaction)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .build();
                    crawlerDataList.add(crawlerInteractionData);
                }
            }
        }
        return crawlerDataList;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexRegex);
        addUrlRegular(tagListRegex);
        addUrlRegular(listUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentsRegex);
        addUrlRegular(sportsListUrlRegex);
        addUrlRegular(sportsIndexRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        String site = crawlerRecord.tagsCreator().bizTags().site();
        if(StringUtils.isBlank(site) || site.equals("sports")){
            return true;
        }
        return false;
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return "ifeng";
    }


    public static Map<String, Object> copyExtras(Map<String,Object> inExtras){
        Map<String,Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(),entry.getValue());
        }
        return extras;
    }
    /**
     * 将url参数转换成map
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    public static Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = null;
        if (url.contains("?")){
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }
}
