package com.chance.cc.crawler.development.scripts.gome;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import com.google.gson.JsonObject;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.ParseException;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.comment;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

public class GomeCrawlerScript extends CrawlerCommonScript {
    private static final String DOMAIN = "gome";
    private static final String SITE = "commodity";
    private static final String START_URL = "https://search.gome.com.cn/search\\?question=\\S*";
    private static final Logger logger = LoggerFactory.getLogger(GomeCrawlerScript.class);
    //下一页请求头必须的参数
    private static final String REFERER_FORMAT = "https://search.gome.com.cn/search?question=%s&searchType=goods&search_mode=normal&reWrite=true&instock=1";
    //详细页的正则
    private static final String DETAIL_REGEX = "https://item.gome.com.cn/\\S*-\\S*.html";
    //得到个别商品价格的正则
    private static final String GET_PRICE_REGEX = "https://ss.gome.com.cn/item/v1/d/m/store/unite/\\S*";
    //下一页的正则
    private static final String PAGENEXT_REGEX = "https://search.gome.com.cn/search\\?search_mode=normal&reWrite=true&question=\\S*&searchType=goods&instock=1&facets=&page=\\d+&bws=0&type=json&reWrite=true&rank=1";
    //评论信息正则
    private static final String COMMENT_REGEX = "https://ss.gome.com.cn/item/v1/prdevajsonp/appraiseNew/\\S*";
    //格式化下一页url
    private static final String NEXTPAGE_FORMAT = "https://search.gome.com.cn/search?search_mode=normal&reWrite=true&question=%s&searchType=goods&instock=1&facets=&page=%s&bws=0&type=json&reWrite=true&rank=1";
    //格式化个别商品价格链接
    private static final String PRICE_FORMAT = "https://ss.gome.com.cn/item/v1/d/m/store/unite/G001/fshop/%s/%s/21010000/N/21010800/210108008/null/1/flag/item/allStores7135?_=%s";
    private static final String PRICE2_FORMAT = "https://ss.gome.com.cn/item/v1/d/m/store/unite/80020766/pop/%s/%s/21010000/N/21010800/210108008/null/1/flag/item/allStores84?&_=%s";

    @Override
    public String domain() {
        return DOMAIN;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(START_URL);
        addUrlRegular(DETAIL_REGEX);
        addUrlRegular(GET_PRICE_REGEX);
        addUrlRegular(PAGENEXT_REGEX);
        addUrlRegular(COMMENT_REGEX);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(SITE);
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportRecord : supportRecords) {
            String url = supportRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyWords(crawlerRequestRecord, supportRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }

    public void initKeyWords(CrawlerRequestRecord crawlerRequestRecord, CrawlerRequestRecord supportRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportRecord.getInternalDownloadPage();
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
        String searchKeyUrl = (String) extras.get("searchKvSourceUrl");
        if (StringUtils.isBlank(searchKeyUrl)) {
            logger.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!Objects.equals(msg, "success")) {
            logger.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }
        List<String> list_content = json.jsonPath($_type + ".content").all();
        for (String lc : list_content) {
            JSONObject jsonObject = JSONObject.parseObject(lc);
            String keyword = jsonObject.getString("keyword");
            if (StringUtils.isNotEmpty(keyword)) {
                try {
                    String url_enSure = String.format(searchKeyUrl, URLEncoder.encode(keyword, "UTF-8"));
                    CrawlerRequestRecord ensureRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .httpUrl(url_enSure)
                            .notFilterRecord()
                            .releaseTime(System.currentTimeMillis())
                            .copyResultTags()
                            .copyBizTags()
                            .build();
                    ensureRecord.getHttpRequest().addExtra("keyword", keyword);
                    ensureRecord.tagsCreator().bizTags().addCustomKV("keywords", keyword);
                    crawlerRecords.add(ensureRecord);
                } catch (UnsupportedEncodingException e) {
                    logger.error(e.getMessage());
                }
            }
        }

    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<CrawlerRequestRecord>();
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();

        if (!httpPage.isDownloadSuccess() || httpPage.getStatusCode() != 200) {
            logger.error("{} status code : [{}]", requestUrl, httpPage.getStatusCode());
            crawlerRequestRecord.setNeedWashPage(false);
            return parseLinks;
        }
        if (requestUrl.matches(START_URL)) {
            //首先生成第一页的链接
            Object question = getUrlParams(httpPage.getRequest().getUrl()).get("question");
            String nextPageUrl = String.format(NEXTPAGE_FORMAT, question, 1);
            CrawlerRequestRecord pageRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .needWashed(true)
                    .httpHead("referer", String.format(REFERER_FORMAT, question))
                    .copyResultTags()//article
                    .copyBizTags()
                    .build();
            pageRecord.getHttpRequest().addExtra("isWashProductUrl", "yes");
            pageRecord.getHttpRequest().addExtra("parseNextPage", "parseNextPage");
            parseLinks.add(pageRecord);

            //parseDetails(crawlerRequestRecord, httpPage, parseLinks);
        }
        if (requestUrl.matches(PAGENEXT_REGEX)) {
            parseNextPages(crawlerRequestRecord, httpPage, parseLinks);
            parseDetails(crawlerRequestRecord, httpPage, parseLinks);
        }
        if (requestUrl.matches(DETAIL_REGEX)) {
            //将comment的信息记录到record中
            parseArticle(crawlerRequestRecord, httpPage, parseLinks);
        }
        if (requestUrl.matches(GET_PRICE_REGEX)) {
            String rawText = httpPage.getRawText();
            String data = rawText.substring(rawText.indexOf("(") + 1, rawText.lastIndexOf(")"));
            String flag = null;
            try {
                flag = new Json(data).jsonPath($_type + ".success").get();
            } catch (Exception e) {
                logger.error(e.getMessage());
            }
            Boolean tag = new Boolean(flag);//没有数据返回false
            if (tag) {
                crawlerRequestRecord.getHttpRequest().addExtra("success", "true");
                parseLinks.add(crawlerRequestRecord);
            }
        }
        return parseLinks;
    }

    //解析下一页，记录下一页的record
    public void parseNextPages(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinks) {
        String html = httpPage.getRawText();
        String url = httpPage.getRequest().getUrl();
        Object question = getUrlParams(url).get("question");
        Object page = getUrlParams(url).get("page");
        Integer currentPage = Integer.parseInt(page + "");
        Integer nextPage = currentPage + 1;
        String totalPageStr = new Json(html).jsonPath($_type + ".content.pageBar.totalPage").get();
        int totalPage = (totalPageStr.isEmpty() ? 1 : Integer.parseInt(totalPageStr));

        if (nextPage <= totalPage) {
            String nextPageUrl = String.format(NEXTPAGE_FORMAT, question, nextPage);
            CrawlerRequestRecord pageRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .needWashed(true)
                    .httpHead("referer", String.format(REFERER_FORMAT, question))
                    .copyResultTags()//article
                    .copyBizTags()
                    .build();
            pageRecord.getHttpRequest().addExtra("isWashProductUrl", "yes");
            pageRecord.getHttpRequest().addExtra("parseNextPage", "parseNextPage");
            parseLinks.add(pageRecord);
        }
    }

    /*public void parseNextPages(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinks) {
        String html = httpPage.getHtml().get();
        Object question = getUrlParams(httpPage.getRequest().getUrl()).get("question");
        String totalPage = html.substring(html.lastIndexOf("totalPage") + 13, html.indexOf("regionId") - 6).split(",")[0];
        int count = (totalPage.isEmpty() ? 1 : Integer.parseInt(totalPage));
        for (int i = 1; i <= count; i++) {//控制了总页数为四，还需要修改.如果为空，没有下一页？
            String nextPageUrl = String.format(NEXTPAGE_FORMAT, question, i);
            CrawlerRequestRecord pageRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(nextPageUrl)
                    .recordKey(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .needWashed(true)
                    .httpHead("referer", String.format(REFERER_FORMAT, question))
                    .copyResultTags()//article
                    .copyBizTags()
                    .build();
            pageRecord.getHttpRequest().addExtra("isWashProductUrl", "yes");
            pageRecord.getHttpRequest().addExtra("parseNextPage", "parseNextPage");
            parseLinks.add(pageRecord);
        }
    }*/

    //解析详细页，记录了每个商品的record和评论信息的record
    public void parseDetails(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinks) {
        Json json = httpPage.getJson();
        int count = 0;
        try {
            count = json.jsonPath($_type + ".content.prodInfo.products").all().size();
        } catch (Exception e) {
            logger.error(e.getMessage());
        }
        //得到每个商品的链接记录到record中
        for (int i = 0; i < count; i++) {
            String sUrl = httpPage.getJson().jsonPath($_type + ".content.prodInfo.products[" + i + "].sUrl").get();
            String productUrl = "https:" + sUrl;
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(productUrl)
                    .httpUrl(productUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()//article
                    .build();
            itemRecord.getHttpRequest().addExtra("isWashProductInfo", "yes");
            parseLinks.add(itemRecord);

        }
    }

    public void parseArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinks) {
        if (!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")) {
            logger.error("gome crawler comment need to filter information!");
            return;
        }
        String commentUrl_format = "https://ss.gome.com.cn/item/v1/prdevajsonp/appraiseNew/%s/1/all/0/10/flag/appraise/all?callback=all&_=%s";
        KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
        CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(), CrawlerRecord.class);
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        String pId = requestUrl.split("/")[3].split("-")[0];
        String skuId = requestUrl.split("/")[3].split("-")[1];
        skuId = skuId.substring(0,skuId.lastIndexOf("."));
        String articleKey = pId + skuId;
        String commentUrl = String.format(commentUrl_format, pId, System.currentTimeMillis());
        CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(commentUrl)
                .httpUrl(commentUrl)
                .releaseTime(System.currentTimeMillis())
                .notFilterRecord()
                .copyBizTags()
                .copyResultTags()//comment
                .build();
        commentRecord.getHttpRequest().addExtra("isWashComment", "yes");
        commentRecord.getHttpRequest().addExtra("articleKey", articleKey);

        commentRecord.setFilter(filterInfoRecord.getFilter());
        commentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
        //crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().removeLabelTag("comment"); //必须去除上一级的过滤条件，分开过滤
        parseLinks.add(commentRecord);
        //得到商品价格的record
        parseLinks.add(getProductRecord(crawlerRequestRecord, pId, skuId, PRICE_FORMAT, "isWashPresentPrice"));//活动价格
        parseLinks.add(getProductRecord(crawlerRequestRecord, pId, skuId, PRICE2_FORMAT, "isWashOriginalPrice"));//参考价格
    }

    //得到商品参考价格的record通用方法
    public CrawlerRequestRecord getProductRecord(CrawlerRequestRecord crawlerRequestRecord, String pId, String skuId, String format, String isWashPrice) {
        String articleKey = pId + skuId;
        String priceUrl = String.format(format, pId, skuId, System.currentTimeMillis());
        CrawlerRequestRecord priceRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(priceUrl)
                .httpUrl(priceUrl)
                .releaseTime(System.currentTimeMillis())
                .needParsed(true)
                .copyBizTags()
                .copyResultTags()//article
                .build();
        priceRecord.getHttpRequest().addExtra(isWashPrice, "yes");
        priceRecord.getHttpRequest().addExtra("articleKey", articleKey);
        return priceRecord;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDatas = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRequestRecord.tagsCreator().resultTags();
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
        boolean product_url = crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("product_url");

        if (!Objects.isNull(extras.get("isWashProductUrl")) && product_url) {//得到所有商品的url,清洗下一页的数据（得到其中的商品链接)
            crawlerDatas.addAll(washArticleUrl(crawlerRequestRecord, httpPage));
        }
        if (crawlerResultTags.hasDataType(article)) {
            if (!Objects.isNull(extras.get("isWashProductInfo"))) {
                crawlerDatas.add(washArticle(crawlerRequestRecord, httpPage));
            }
            if (!Objects.isNull(extras.get("isWashOriginalPrice")) && new Boolean((String) extras.get("success"))) {
                crawlerDatas.add(washProductPrice(crawlerRequestRecord, httpPage, Field_Original_Price));
            }
            if (!Objects.isNull(extras.get("isWashPresentPrice")) && new Boolean((String) extras.get("success"))) {
                crawlerDatas.add(washProductPrice(crawlerRequestRecord, httpPage, Field_Present_Price));
            }
        }
        if (crawlerResultTags.hasDataType(comment) && !Objects.isNull(extras.get("isWashComment")) && crawlerRequestRecord.getHttpRequest().getUrl().matches(COMMENT_REGEX)) {
            crawlerDatas.addAll(washComment(crawlerRequestRecord, httpPage));
        }
        return crawlerDatas;
    }

    public List<CrawlerData> washArticleUrl(CrawlerRequestRecord articleUrlRecord, HttpPage httpPage) {
        //得到所有商品的url,清洗下一页的数据（得到其中的商品链接)
        List<CrawlerData> crawlerDatas = new ArrayList<>();
        String requestUrl = articleUrlRecord.getHttpRequest().getUrl();
        if (requestUrl.matches(PAGENEXT_REGEX) && articleUrlRecord.getHttpRequest().getExtras().get("parseNextPage") != null) {
            Json json = httpPage.getJson();//下一页的界面信息
            List<String> list_product = null;
            try {
                list_product = json.jsonPath(($_type + ".content.prodInfo.products")).all();
            } catch (Exception e) {
                logger.error(e.getMessage());
            }
            for (String product : list_product) {
                JSONObject jsonObject = JSONObject.parseObject(product);
                String pId = jsonObject.getString("pId");
                String skuId = jsonObject.getString("skuId");
                String sUrl = jsonObject.getString("sUrl");
                CrawlerData proUrlData = CrawlerData.builder()
                        .data(articleUrlRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", articleUrlRecord.getDomain(), article, pId + skuId))
                        .releaseTime(System.currentTimeMillis())
                        .addContentKV(Field_Urls, "https:" + sUrl)
                        .resultLabelTag(article)
                        .build();
                crawlerDatas.add(proUrlData);
            }
        }
        return crawlerDatas;
    }

    //清洗每款鞋子的record
    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String url = httpPage.getRequest().getUrl();
        String urlSub = url.split("/")[3];
        String articleKey = urlSub.substring(0, urlSub.lastIndexOf(".")).replace("-", "");
        String title = httpPage.getHtml().xpath("//div[@class=\"hgroup\"]/h1/text()").get();
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"pic-small j-gRbox j-pichover clearfix\"]/ul/li/a/img/@gome-src").nodes();
        StringBuffer imgUrls = new StringBuffer();
        //在html界面上拿到价格数据
//        String rawText = httpPage.getRawText();
//        String gomePrice = rawText.substring(rawText.indexOf("gomePrice:") + 11,rawText.lastIndexOf("firstCategoryName")-5).replace(" ", "");
//        if("0".equals(gomePrice)) gomePrice = null;
        for (Selectable node : nodes) {
            String imgUrl = node.get();
            imgUrls.append("https:").append(imgUrl).append("\\x01");
        }
        CrawlerData shoeData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article, articleKey))
                .releaseTime(System.currentTimeMillis())
                .addContentKV(Field_Title, title)
                .addContentKV(Field_Images, imgUrls.toString())
                //.addContentKV(Field_Produce_Price,gomePrice)
                .resultLabelTag(article)
                .build();
        return shoeData;
    }
    /**
     * 清洗参考，活动价格
     */
    public CrawlerData washProductPrice(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, String priceType) {
        String rawText = httpPage.getRawText();
        String jsonStr = rawText.substring(rawText.indexOf("(") + 1, rawText.lastIndexOf(")"));
        Json json = new Json(jsonStr);
        String salesPrice = null;
        try {
            salesPrice = json.jsonPath($_type + ".result.gomePrice.salePrice").get();
        } catch (Exception e) {
            logger.error(e.getMessage());
        }
        Object articleKey = crawlerRequestRecord.getHttpRequest().getExtras().get("articleKey");
        CrawlerData priceCrawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article, articleKey))
                .releaseTime(System.currentTimeMillis())
                .addContentKV(priceType, "0.0".equals(salesPrice) ? null : salesPrice)//参考，活动价格一旦出现0.0情况则不显示
                .resultLabelTag(article)
                .build();
        return priceCrawlerData;
    }
//清洗评论信息
    public List<CrawlerData> washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDatas = new ArrayList<>();
        String raxText = httpPage.getRawText();
        String jsonStr = raxText.substring(raxText.indexOf("(") + 1, raxText.lastIndexOf(")"));//去除前面的all，方便转json格式
        Json json = new Json(jsonStr);

        List<String> evalist = null;
        try {
            evalist = json.jsonPath($_type + ".evaList.Evalist").all();
        } catch (Exception e) {
            logger.error(e.getMessage());
        }
        if (evalist != null && evalist.size() > 0) {
            for (String list : evalist) {
                Json listJson = new Json(list);
                String content = listJson.jsonPath($_type + ".appraiseElSum").get();
                String timeStr = listJson.jsonPath($_type + ".post_time").get();
                String authorName = null;
                try {
                    authorName = listJson.jsonPath($_type + ".loginname").get();
                } catch (Exception e) {
                    logger.error(httpPage.getRequest().getUrl() + "链接中，该评论没有用户名(loginname)");
                }
                //String commentKey = listJson.jsonPath($_type + ".appraiseId").get();//会出现重复的值
                String likes = listJson.jsonPath($_type + ".apprnum").get();
                String articleKey = crawlerRequestRecord.getHttpRequest().getExtras().get("articleKey").toString();
                String dataId = StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment, articleKey);
                Long time = null;
                try {
                    time = DateUtils.parseDate(timeStr, "yyyy-MM-dd hh:mm").getTime();
                } catch (ParseException e) {
                    e.printStackTrace();
                }
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(dataId)
                        //.parentId(StringUtils.joinWith("-",crawlerRequestRecord.getDomain(),CrawlerEnum.CrawlerDataType.comment.enumVal(),articleKey))
                        .releaseTime(time)
                        .addContentKV(Field_Author, authorName)
                        .addContentKV(Field_Content, content)
                        .addContentKV(Field_I_Likes, likes)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                        .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                        .resultLabelTag(comment)
                        .build();
                crawlerDatas.add(crawlerData);
                //清洗评论回复信息
                List<String> replayAll = listJson.jsonPath($_type + ".replyall").all();
                for (String rall : replayAll) {
                    Json rJson = new Json(rall);
                    String repContent = rJson.jsonPath($_type + ".content").get();
                    String repTimeStr = rJson.jsonPath($_type + ".replyTime").get();
                    String repAuthor = rJson.jsonPath($_type + ".user").get();
                    String repId = rJson.jsonPath($_type + ".replyId").get();
                    String repKey = StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.comment, repId);
                    Long repTime = null;
                    try {
                        repTime = DateUtils.parseDate(repTimeStr, "yyyy-MM-dd hh:mm").getTime();
                    } catch (ParseException e) {
                        e.printStackTrace();
                    }
                    CrawlerData repCrawlerData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .dataId(repKey)
                            .releaseTime(repTime)
                            .addContentKV(Field_Author, repAuthor)
                            .addContentKV(Field_Content, repContent)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                            .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                            .resultLabelTag(CrawlerEnum.CrawlerDataType.comment)
                            .build();

                    crawlerDatas.add(repCrawlerData);
                }
            }
        }
        return crawlerDatas;
    }

    //解析url的参数
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = url;
        if (url.contains("?")) {
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

}
