package com.chance.cc.crawler.development.scripts.tianya;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.KVTag;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.*;

/**
 * @author lt
 * @version 1.0
 * @date 2021-03-22 13:24:52
 * @email okprog@sina.com
 */
public class TianYaBBSCrawlerScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(TianYaBBSCrawlerScript.class);

    public static final String indexUrlRegex = "https?://bbs\\.tianya\\.cn/";
    public static final String listIndexUrlRegex = "https?://bbs\\.tianya\\.cn/list\\.jsp\\?item=\\S*&order=1";
    public static final String listPageUrlRegex = "https?://bbs\\.tianya\\.cn/list\\.jsp\\?item=\\S*&nextid=\\d*";
    public static final String articleUrlRegex = "https?://bbs\\.tianya\\.cn/post-\\S*-\\d*-\\d*\\.shtml";
    public static final String authorInfoUrlRegex = "https?://www\\.tianya\\.cn/\\d*";
    public static final String commentUrlRegex = "https?://bbs\\.tianya\\.cn/api\\S*";

    public static final String listIndexUrlFormat = "http://bbs.tianya.cn/list.jsp?item=%s&order=1";
    public static final String listPageUrlFormat = "http://bbs.tianya.cn/list.jsp?item=%s&nextid=%s";
    public static final String articleUrlFormat = "http://bbs.tianya.cn/post-%s-%s-%s.shtml";
    public static final String authorInfoUrlFormat = "http://www.tianya.cn/%s";//uid
    public static final String commentUrlFormat = "http://bbs.tianya.cn/api?method=bbs.api.getCommentList&params.item=%s&params.articleId=%s&params.replyId=%s&params.pageNum=%s";

    private static final String scriptSite = "bbs";



    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        if (404 == httpPage.getStatusCode()){
            logger.info("status code is 404");
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        if (doHttpPageCheck(crawlerRequestRecord,httpPage)){
            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parsedLinks.add(crawlerRequestRecord);
            return parsedLinks;
        }
        String lastRequestUrl = lastRequest.getUrl();
        if (lastRequestUrl.matches(indexUrlRegex)){
            Map<String, Object> extras = lastRequest.getExtras();
            List<String> keywords = castList(extras.get("keywords"),String.class);
            for (String keyword : keywords) {
                String listIndexUrl = String.format(listIndexUrlFormat,keyword);
                CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(listIndexUrl)
                        .recordKey(listIndexUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .build();
                requestRecord.tagsCreator().bizTags().addKeywords(keyword);
                parsedLinks.add(requestRecord);
            }
            return parsedLinks;
        }
        if (lastRequestUrl.matches(listIndexUrlRegex) || lastRequestUrl.matches(listPageUrlRegex)){
            return parseListLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(articleUrlRegex)){
            return parseArticleLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        if (lastRequestUrl.matches(commentUrlRegex)){
            return parseCommentLinks(crawlerRequestRecord,httpPage,parsedLinks);
        }
        return null;
    }

    private List<CrawlerRequestRecord> parseCommentLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setNeedWashPage(true);
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String item = (String) extras.get("item");
        String articleKey = (String) extras.get("articleKey");
        String replyId = (String) extras.get("replyId");
        Map<String, Object> replyMap = (Map<String, Object>) extras.get("replyMap");
        String authorId = (String) replyMap.get("authorId");
        Map<String, Object> urlParams = getUrlParams(httpRequest.getUrl());
        int pageNum = Integer.parseInt((String) urlParams.get("params.pageNum"));
        if (pageNum == 1) {
            String authorUrl = String.format(authorInfoUrlFormat,authorId);
            CrawlerRequestRecord authorInfoRecord = getAuthorInfoRecord(crawlerRequestRecord, authorUrl);
            if (null != authorInfoRecord){
                parsedLinks.add(authorInfoRecord);
            }
        }
        //翻页
        JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
        JSONArray objects = pageObj.getJSONArray("data");
        if (objects.size() == 10){
            pageNum += 1;
            String nextApiUrl = String.format(commentUrlFormat,item,articleKey,replyId,pageNum);
            CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(nextApiUrl)
                    .recordKey(nextApiUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .copyBizTags()
                    .needParsed(true)
                    .needWashed(false)
                    .resultLabelTag(comment)
                    .build();
            commentRecord.getHttpRequest().setExtras(copyExtras(extras));
            parsedLinks.add(commentRecord);
        }
        return parsedLinks;
    }

    private List<CrawlerRequestRecord> parseArticleLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        crawlerRequestRecord.setNeedWashPage(true);
        //截取页数，判断翻页
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        String articleKey = lastRequestUrl.split("-")[2];
        String item = lastRequestUrl.split("-")[1];
        crawlerRequestRecord.getHttpRequest().addExtra("articleUrl",lastRequestUrl);
        crawlerRequestRecord.getHttpRequest().addExtra("articleKey",articleKey);
        crawlerRequestRecord.getHttpRequest().addExtra("item",item);
        Html html = httpPage.getHtml();
        int curPage = Integer.parseInt(lastRequestUrl.substring(lastRequestUrl.lastIndexOf("-") + 1).split("\\.")[0]);
        if (curPage == 1){
            //获取作者信息内下载record
            String authorUrl = html.xpath("//div[@id=\"post_head\"]/div/div[@class=\"atl-info\"]/span/a[1]/@href|//div[@class=\"wd-question\"]/div/span[@class=\"ml5\"]/a[1]/@href").get();
            CrawlerRequestRecord authorInfoRecord = getAuthorInfoRecord(crawlerRequestRecord,authorUrl);
            if (null != authorInfoRecord){
                parsedLinks.add(authorInfoRecord);
            }
        }
        //翻页
        List<String> allPagers = html.xpath("//div[@id=\"post_head\"]//div[@class=\"atl-pages\"]/form/a/@href").all();
        if (null != allPagers && allPagers.size() > 0){
            String nextPageUrl = allPagers.get(allPagers.size() -1);
            nextPageUrl = "http://bbs.tianya.cn" + nextPageUrl;
            if (!lastRequestUrl.equalsIgnoreCase(nextPageUrl)){
                CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(nextPageUrl)
                        .recordKey(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .build();
                parsedLinks.add(nextPageRecord);
            }
        }
        //判断是否采集评论
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().scheduleTags().getCategoryTag();
        if (categoryTag.getLabelTag(CrawlerEnum.CrawlerDataType.comment.enumVal()) != null) {
            if(!crawlerRequestRecord.tagsCreator().bizTags().hasKVTag("comment_record_filter_info")){
                logger.error("tianya crawler comment need to filter information!");
                return parsedLinks;
            }
            KVTag filterInfoTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filterInfoTag.getVal(),CrawlerRecord.class);
            //单条解析
            List<Selectable> commentNodes = html.xpath("//div[@class=\"atl-main\"]/div[@class=\"atl-item\"]").nodes();
            for (Selectable commentNode : commentNodes) {
                CrawlerRequestRecord commentRecord = getCommentRecords(crawlerRequestRecord,lastRequestUrl, articleKey, item, filterInfoRecord, commentNode);
                if (null != commentRecord){
                    parsedLinks.add(commentRecord);
                }
            }
        }
        return parsedLinks;
    }

    private CrawlerRequestRecord getCommentRecords(CrawlerRequestRecord crawlerRequestRecord,String articleUrl, String articleKey, String item, CrawlerRecord filterInfoRecord, Selectable commentNode) {
        String replyId = commentNode.xpath("./@replyid").get();
        String floor = commentNode.xpath("./@id").get();
        String publishTime = commentNode.xpath("./@js_restime").get();
        String author = commentNode.xpath("./@js_username").get();
        String authorId = commentNode.xpath("./@hostid").get();
        String postClient = commentNode.xpath("./div[@class=\"atl-content\"]/div/div[@class=\"atl-reply\"]/a[@class=\"a-link\"]/text()").get();
        if (StringUtils.isBlank(postClient)){
            postClient = "";
        }
        String likes = commentNode.xpath("./div[@class=\"atl-content\"]/div/div[@class=\"atl-reply\"]/span[@class=\"tuijian hasZan\"]/@data-sum").get();
        if (StringUtils.isBlank(likes)){
            likes = "0";
        }
        String commentsStr = commentNode.xpath("./div[@class=\"atl-content\"]/div/div[@class=\"atl-reply\"]/a[@class=\"a-link-2 ir-remark\"]/text()").get();
        String comments = null;
        try {
            comments = commentsStr.split("\\(")[1].split("\\)")[0];
        } catch (Exception e) {
            comments = "0";
        }
        List<String> contents = commentNode.xpath("./div[@class=\"atl-content\"]/div/div[@class=\"bbs-content\"]//text()").all();
        List<String> images = commentNode.xpath("./div[@class=\"atl-content\"]/div/div[@class=\"bbs-content\"]//img/@original").all();
        Map<String,Object> replyMap = new HashMap<>();
        replyMap.put("replyId",replyId);
        replyMap.put("floor",floor);
        replyMap.put("publishTime",publishTime);
        replyMap.put("author",author);
        replyMap.put("authorId",authorId);
        replyMap.put("postClient",postClient);
        replyMap.put("likes",likes);
        replyMap.put("comments",comments);
        replyMap.put("contents",contents);
        replyMap.put("images",images);
        Map<String,Object> replyExtras = new HashMap<>();
        replyExtras.put("articleUrl",articleUrl);
        replyExtras.put("articleKey",articleKey);
        replyExtras.put("item",item);
        replyExtras.put("replyId",replyId);
        replyExtras.put("replyMap",replyMap);
        String commentUrl = String.format(commentUrlFormat,item,articleKey,replyId,1);
        CrawlerRequestRecord commentRecord = null;
        try {
            commentRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(commentUrl)
                    .recordKey(commentUrl)
                    .releaseTime(DateUtils.parseDate(publishTime,"yyyy-MM-dd HH:mm:ss").getTime())
                    .copyBizTags()
                    .needParsed(true)
                    .needWashed(false)
                    .resultLabelTag(comment)
                    .resultLabelTag(interaction)
                    .build();
            commentRecord.getHttpRequest().setExtras(replyExtras);
        }catch (Exception e){
            logger.error(e.getMessage());
            return null;
        }
        return commentRecord;
    }

    private CrawlerRequestRecord getAuthorInfoRecord(CrawlerRequestRecord crawlerRequestRecord, String authorUrl) {
        return CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .httpUrl(authorUrl)
                .recordKey(authorUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .requestLabelTag(internalDownload)
                .notFilterRecord()
                .build();
    }

    private List<CrawlerRequestRecord> parseListLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        CrawlerRequestRecord nextPageRecord = getNextPageRecord(crawlerRequestRecord,httpPage,lastRequestUrl);
        if (null != nextPageRecord){
            parsedLinks.add(nextPageRecord);
        }
        Html html = httpPage.getHtml();
        List<Selectable> itemNodes = html.xpath("//div[@class=\"mt5\"]/table/tbody/tr").nodes();
        for (Selectable itemNode : itemNodes) {
            String url = itemNode.xpath("./td[1]/a/@href").get();
            if (StringUtils.isBlank(url)){
                continue;
            }
            String[] urlSplits = url.split("-");
            String item = urlSplits[1];
            String articleKey = urlSplits[2];
            String itemUrl = String.format(articleUrlFormat,item,articleKey,1);
            String publishTime = itemNode.xpath("./td/@title").get();
            try {
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(itemUrl)
                        .recordKey(itemUrl)
                        .releaseTime(DateUtils.parseDate(publishTime,"yyyy-MM-dd HH:mm").getTime())
                        .resultLabelTag(article)
                        .resultLabelTag(interaction)
                        .copyBizTags()
                        .build();
                parsedLinks.add(itemRecord);
            }catch (Exception e){
                logger.error(e.getMessage());
            }
        }
        return parsedLinks;
    }

    private CrawlerRequestRecord getNextPageRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, String lastRequestUrl) {
        Html html = httpPage.getHtml();
        Map<String, Object> urlParams = getUrlParams(lastRequestUrl);
        String item = (String) urlParams.get("item");

        String nextPageUrl = html.xpath("//div[@class=\"links\"]/a[@rel]/@href").get();
        if (StringUtils.isBlank(nextPageUrl)){
            logger.info("no next page url");
            return null;
        }
        String nextId = nextPageUrl.substring(nextPageUrl.lastIndexOf("=") + 1);
        nextPageUrl = String.format(listPageUrlFormat,item,nextId);
        return CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextPageUrl)
                .recordKey(nextPageUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .build();
    }

    @Override
    public void afterInternalDownload(CrawlerRequestRecord crawlerRecord, List<CrawlerRequestRecord> internalDownloadRecords, List<CrawlerRequestRecord> links) {
        for (CrawlerRequestRecord internalDownloadRecord : internalDownloadRecords) {
            HttpPage internalDownloadPage = internalDownloadRecord.getInternalDownloadPage();
            if (doHttpPageCheck(internalDownloadRecord, internalDownloadPage)){
                crawlerRecord.setNeedWashPage(false);
                crawlerRecord.tagsCreator().requestTags().removeRequestType(filter);
                links.add(crawlerRecord);
                continue;
            }
            HttpRequest httpRequest = crawlerRecord.getHttpRequest();
            String url = internalDownloadRecord.getHttpRequest().getUrl();
            Html html = internalDownloadPage.getHtml();
            if (url.matches(authorInfoUrlRegex)){
                String concerns = html.xpath("//div[@class=\"relate-link\"]/div[1]/p/a/text()").get();
                String follows = html.xpath("//div[@class=\"relate-link\"]/div[2]/p/a/text()").get();
                String signTime = html.xpath("//div[@class=\"userinfo\"]/p[2]/text()").get();
                httpRequest.addExtra("concerns",concerns);
                httpRequest.addExtra("follows",follows);
                httpRequest.addExtra("signTime",signTime);
            }

        }
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        HttpRequest httpRequest = crawlerRequestRecord.getHttpRequest();
        Map<String, Object> extras = httpRequest.getExtras();
        String requestUrl = httpRequest.getUrl();
        String articleKey = (String) extras.get("articleKey");
        String articleUrl = (String) extras.get("articleUrl");
        String item = (String) extras.get("item");
        String rawText = httpPage.getRawText();
        Html html = httpPage.getHtml();
        if (StringUtils.isBlank(rawText)){
            logger.error("httpPage is empty !");
            return crawlerDataList;
        }
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(article)){
            washArticleData(crawlerRequestRecord, httpPage, crawlerDataList, extras, requestUrl, articleKey, articleUrl, item, html);
        }

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(comment)){
            washCommentData(crawlerRequestRecord, httpPage, crawlerDataList, httpRequest, extras, articleKey, articleUrl, item);
        }
        return crawlerDataList;
    }

    private void washCommentData(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerData> crawlerDataList, HttpRequest httpRequest, Map<String, Object> extras, String articleKey, String articleUrl, String item) {
        Map<String, Object> replyMap = (Map<String, Object>) extras.get("replyMap");
        String replyId = (String)replyMap.get("replyId");
        String floor = (String)replyMap.get("floor");
        String publishTime = (String)replyMap.get("publishTime");
        String author = (String)replyMap.get("author");
        String authorId = (String)replyMap.get("authorId");
        String postClient = (String)replyMap.get("postClient");
        String likes = (String)replyMap.get("likes");
        String comments = (String)replyMap.get("comments");
        List<String> contents = (List<String>)replyMap.get("contents");
        StringBuffer sbContent = new StringBuffer();
        for (String content : contents) {
            sbContent.append(content.trim());
        }
        List<String> images = (List<String>)replyMap.get("images");
        StringBuffer sbImage = new StringBuffer();
        for (String image : images) {
            sbImage.append(image).append("\\x01");
        }
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord,httpPage)
                .url(articleUrl)
                .dataId(StringUtils.joinWith("-", domain(),site,comment.enumVal(),item,replyId))
                .parentId(StringUtils.joinWith("-",domain(),site,article.enumVal(),item,articleKey))
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_Author,author)
                .addContentKV(Field_Author_Id,authorId)
                .addContentKV(Field_Floor,floor)
                .addContentKV(Tag_Field_PostClient,postClient)
                .addContentKV(Field_Content,sbContent.toString())
                .addContentKV(Field_Images,sbImage.toString())
                .resultLabelTag(comment)
                .requestLabelTag(filter)
                .requestLabelTag(result)
                .build();
        crawlerDataList.add(crawlerData);

        //互动
        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
            CrawlerData crawlerInteractionData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(articleUrl)
                    .dataId(StringUtils.joinWith("-",domain(),site,interaction.enumVal(),item,replyId))
                    .parentId(StringUtils.joinWith("-",domain(),site,comment.enumVal(),item,replyId))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_I_Comments,comments)
                    .addContentKV(Field_I_Likes,likes)
                    .resultLabelTag(interaction)
                    .requestLabelTag(filter)
                    .requestLabelTag(result)
                    .build();
            crawlerDataList.add(crawlerInteractionData);
        }

        //评论的回复
        try {
            JSONObject pageObj = JSONObject.parseObject(httpPage.getRawText());
            JSONArray replyObjs = pageObj.getJSONArray("data");
            for (Object replyObj : replyObjs) {
                JSONObject reply = (JSONObject)replyObj;
                String replyAuthor = reply.getString("author_name");
                String replyAuthorId = reply.getString("author_id");
                String replyNId = reply.getString("id");
                String replyContent = reply.getString("content");
                String replyTime = reply.getString("comment_time");
                CrawlerData commentData = CrawlerData.builder()
                        .data(crawlerRequestRecord,httpPage)
                        .url(articleUrl)
                        .dataId(StringUtils.joinWith("-",domain(),site,comment.enumVal(),item,replyNId))
                        .parentId(StringUtils.joinWith("-", domain(),site,comment.enumVal(),item,replyId))
                        .releaseTime(DateUtils.parseDate(replyTime.split("\\.")[0],"yyyy-MM-dd HH:ss:mm").getTime())
                        .addContentKV(Field_Author,replyAuthor)
                        .addContentKV(Field_Author_Id,replyAuthorId)
                        .addContentKV(Field_Content,replyContent)
                        .requestLabelTag(filter)
                        .requestLabelTag(result)
                        .resultLabelTag(comment)
                        .build();
                crawlerDataList.add(commentData);
            }
        } catch (Exception e) {
            logger.error("parse comment`s replies failed , check {}",httpRequest.getUrl());
        }
    }

    private void washArticleData(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerData> crawlerDataList, Map<String, Object> extras, String requestUrl, String articleKey, String articleUrl, String item, Html html) {
        String title = html.xpath("//div[@id=\"post_head\"]/h1/span/span/text()|//h1/span/text()").get();
        String author = html.xpath("//div[@id=\"post_head\"]/div[@class=\"atl-menu clearfix js-bbs-act\"]/div[@class=\"atl-info\"]/span/a/@uname|//div[@class=\"wd-question\"]/div/span[@class=\"ml5\"]/a[1]/text()").get();
        String authorId = html.xpath("//div[@id=\"post_head\"]/div[@class=\"atl-menu clearfix js-bbs-act\"]/div[@class=\"atl-info\"]/span/a/@uid|//div[@class=\"wd-question\"]/div[@class=\"host\"]").get();
        String comments = html.xpath("//div[@id=\"post_head\"]/div[@class=\"atl-menu clearfix js-bbs-act\"]/@js_replycount").get();
        if (StringUtils.isBlank(comments)){
            comments = "0";
        }
        String views = html.xpath("//div[@id=\"post_head\"]/div[@class=\"atl-menu clearfix js-bbs-act\"]/@js_clickcount").get();
        if (StringUtils.isBlank(views)){
            views = "1";
            List<String> strings = html.xpath("//div[@class=\"wd-question\"]/div/span[@class=\"ml5\"]/text()").all();
            String str = "";
            for (String string : strings) {
                str = str + string;
            }
            Matcher matcher = Pattern.compile("点击：\\d*").matcher(str);
            while (matcher.find()){
                views = matcher.group(0).split("：")[1];
            }
        }
        String publishTime = html.xpath("//div[@id=\"post_head\"]/div[@class=\"atl-menu clearfix js-bbs-act\"]/@js_posttime").get();
        if (StringUtils.isBlank(publishTime)){
            List<String> strings = html.xpath("//div[@class=\"wd-question\"]/div/span[@class=\"ml5\"]/text()").all();
            String str = "";
            for (String string : strings) {
                str = str + string;
            }
            Matcher matcher = Pattern.compile("\\d{4}-\\d{2}-\\d{2}\\s*\\d{2}:\\d{2}:\\d{2}").matcher(str);
            while (matcher.find()){
                String time = matcher.group(0);
                try {
                    publishTime = String.valueOf(DateUtils.parseDate(time,"yyyy-MM-dd HH:mm:ss").getTime());
                } catch (ParseException e) {
                    logger.error(e.getMessage());
                }
            }
        }
        List<String> allContents = html.xpath("//div[@class=\"atl-item host-item\"]/div[@class=\"atl-content\"]/div/div[@class=\"bbs-content clearfix\"]//text()|//div[@class=\"wd-question\"]//div[@class=\"text\"]//text()").all();
        StringBuffer sbContent = new StringBuffer();
        for (String allContent : allContents) {
            sbContent.append(allContent.trim());
        }
        List<String> allImages = html.xpath("//div[@class=\"atl-item host-item\"]/div[@class=\"atl-content\"]/div/div[@class=\"bbs-content clearfix\"]//img/@original|//div[@class=\"wd-question\"]//div[@class=\"text\"]//img/@original").all();
        StringBuffer sbImage = new StringBuffer();
        for (String allImage : allImages) {
            sbImage.append(allImage).append("\\x01");
        }
        String likes = html.xpath("//div[@class=\"atl-item host-item\"]/div[@class=\"atl-content\"]/div/div[@class=\"action-tyf\"]/div/p/em/span/@data-number").get();
        if (StringUtils.isBlank(likes)){
            likes = "0";
        }
        String postClient = html.xpath("//div[@class=\"atl-item host-item\"]/div[@class=\"atl-content\"]/div/div/div[@class=\"atl-reply\"]/a[@class=\"a-link\"]/text()").get();
        if (StringUtils.isBlank(postClient)){
            postClient = "";
        }
        String floor = "楼主";
        String follows = (String) extras.get("follows");
        String signTime = (String) extras.get("signTime");
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord,httpPage)
                .url(requestUrl)
                .dataId(StringUtils.joinWith("-",domain(),site,article.enumVal(),item,articleKey))
                .releaseTime(new Long(publishTime))
                .addContentKV(Field_Title,title)
                .addContentKV(Field_Author,author)
                .addContentKV(Field_Author_Id,authorId)
                .addContentKV(Field_Content,sbContent.toString())
                .addContentKV(Field_Images,sbImage.toString())
                .addContentKV(Field_Floor,floor)
                .addContentKV(Field_Author_Follows,follows)
                .addContentKV(Field_Author_Sign_In,signTime)
                .addContentKV(Tag_Field_PostClient,postClient)
                .resultLabelTag(article)
                .build();
        crawlerDataList.add(crawlerData);

        if (crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){
            CrawlerData crawlerInteractionData = CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .url(articleUrl)
                    .dataId(StringUtils.joinWith("-",domain(),site,interaction.enumVal(),item,articleKey))
                    .parentId(StringUtils.joinWith("-",domain(),site,article.enumVal(),item,articleKey))
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV(Field_I_Comments,comments)
                    .addContentKV(Field_I_Likes,likes)
                    .addContentKV(Field_I_Views,views)
                    .resultLabelTag(interaction)
                    .build();
            crawlerDataList.add(crawlerInteractionData);
        }
    }


    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexUrlRegex);
        addUrlRegular(listIndexUrlRegex);
        addUrlRegular(listPageUrlRegex);
        addUrlRegular(articleUrlRegex);
        addUrlRegular(commentUrlRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return "tianya";
    }

    /**
     * 检查页面下载是否成功、完整
     * @param crawlerRequestRecord last record
     * @param httpPage page
     * @return boolean for page check
     */
    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        if (statusCode != 200){
            logger.error("download page {} error, status code is {}",lastRequestUrl,statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()){
            logger.error("download page failed, check your link {}",lastRequestUrl);
            return true;
        }
        if (StringUtils.isBlank(httpPage.getRawText())){
            logger.error("download page empty, check your link {}",lastRequestUrl);
            return true;
        }
        return false;
    }

    /**
     * 将url参数转换成map
     * @param url http://*.*.com?aa=11&bb=22&cc=33
     * @return map
     */
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = url;
        if (url.contains("?")){
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    public static Map<String, Object> copyExtras(Map<String,Object> inExtras){
        Map<String,Object> extras = new HashMap<>();
        for (Map.Entry<String, Object> entry : inExtras.entrySet()) {
            extras.put(entry.getKey(),entry.getValue());
        }
        return extras;
    }

    public static <T> List<T> castList(Object obj, Class<T> clazz)
    {
        List<T> result = new ArrayList<T>();
        if(obj instanceof List<?>)
        {
            for (Object o : (List<?>) obj)
            {
                result.add(clazz.cast(o));
            }
            return result;
        }
        return null;
    }
}
