package com.chance.cc.crawler.development.scripts.tianya.tianyanews;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Html;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;


public class TianYaScript extends CrawlerCommonScript {
    private static final String DOMAIN = "tianya";
    private static final String SITE = "product";
    //首页，下一页链接正则
    private static final String START_URL_REGEX = "https://search.tianya.cn/bbs\\?q=nike&pn=\\d+&s=4";
    private static final String ARTICLE_URL_REGEX = "http://bbs.tianya.cn/post-\\S+-\\d+-1.shtml";
    private static final Logger logger = LoggerFactory.getLogger(TianYaScript.class);
    //格式化
    private static final String NEXTPAGE_FORMAT = "https://search.tianya.cn/bbs?q=nike&pn=%s&s=4";

    @Override
    public void initUrlRegulars() {
        addUrlRegular(START_URL_REGEX);
        addUrlRegular(ARTICLE_URL_REGEX);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(SITE);
    }

    @Override
    public String domain() {
        return DOMAIN;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();

        Selectable xpath = httpPage.getHtml().xpath("//*[@id=\"main\"]/div[2]/ul/li[2]/text()");
        String checkUrl = httpPage.getRequest().getUrl();
        if (xpath != null && "没有找到含有&amp;lsquo;nike&amp;rsquo;的帖子。".equals(xpath.get()) || doHttpPageCheck(crawlerRequestRecord, httpPage)) {
            logger.error("download page empty, check your link {}", checkUrl);
            Object readTimeOutCounts = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("ReadTimeOutCounts");
            if(readTimeOutCounts != null) {
                int count = Integer.parseInt((String)readTimeOutCounts);
                if(count>=5){
                    logger.error("界面重复下载次数超过五次，判断网站无法加载数据内容");
                    return parseLinks;
                }
                count += 1;
                crawlerRequestRecord.tagsCreator().bizTags().addCustomKV("ReadTimeOutCounts",""+count);
            }else{
                crawlerRequestRecord.tagsCreator().bizTags().addCustomKV("ReadTimeOutCounts","1");
            }

            crawlerRequestRecord.setNeedWashPage(false);
            crawlerRequestRecord.tagsCreator().requestTags().removeRequestType(CrawlerEnum.CrawlerRequestType.filter);
            parseLinks.add(crawlerRequestRecord);
            return parseLinks;
        }
        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        if (url.matches(START_URL_REGEX)) {
            parseNextPage(crawlerRequestRecord, httpPage, parseLinks);
            parseDetails(crawlerRequestRecord, httpPage, parseLinks);
        }
        return parseLinks;
    }

    //解析下一页
    public void parseNextPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinks) {
        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        Long nowTime = System.currentTimeMillis();
        Object pn = getUrlParams(url).get("pn");
        String pnStr = pn != null ? (String) pn : "";
        int pnInt = "".equals(pnStr) ? 0 : Integer.parseInt(pnStr);//当前页码
        boolean isLoaderNext = httpPage.getHtml().xpath("//*[@id=\"main\"]/div[3]/span/text()").get() == "下一页" ? false : true;
        if (isLoaderNext) {
            String nextPageUrl = String.format(NEXTPAGE_FORMAT, pnInt + 1);
            CrawlerRequestRecord nextPageRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .needWashed(false)
                    .releaseTime(nowTime)
                    .copyBizTags()
                    .build();
            parseLinks.add(nextPageRecord);
        }
    }

    public void parseDetails(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parseLinks) {
        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"searchListOne\"]/ul/li[*]/div/h3/a/@href").nodes();
        if (nodes == null)
            nodes = httpPage.getHtml().xpath("//div[@class=\"searchListOne\"]/ul/li[*]/div/h3/a/@href").nodes();
        int i = 1;
        for (Selectable itemNode : nodes) {
            String itemUrl = itemNode.get();
            String articleKey = itemUrl.substring(itemUrl.indexOf("-") + 1, itemUrl.lastIndexOf("-"));
            Long time = null;
            String timeStr = httpPage.getHtml().xpath("//div[@class=\"searchListOne\"]/ul/li[" + i + "]/p/span[1]/text()").get();
            if (!StringUtils.isEmpty(timeStr)) {
                try {
                    time = DateUtils.parseDate(timeStr, "yyyy-MM-dd hh:mm:ss").getTime();
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(itemUrl)
                    .httpUrl(itemUrl)
                    .releaseTime(time)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            itemRecord.getHttpRequest().addExtra("articleKey", articleKey);
            parseLinks.add(itemRecord);
        }

    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<CrawlerData>();
        CrawlerResultTags crawlerResultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if (crawlerResultTags.hasDataType(article) || crawlerResultTags.hasDataType(interaction) || crawlerResultTags.hasDataType(comment)) {
            crawlerDataList.add(washArticle(crawlerRequestRecord, httpPage));
            crawlerDataList.add(washInteraction(crawlerRequestRecord, httpPage));
            crawlerDataList.addAll(crawlerDataList.size(), washComments(crawlerRequestRecord, httpPage));
        }
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        Html html = httpPage.getHtml();
        String title = html.xpath("//*[@id=\"post_head\"]/h1/span[1]/span").get();
        String author = html.xpath("//*[@id=\"post_head\"]/div[2]/div[2]/span[1]/a[1]").get();
        String content = null;
        String imageLinks = null;
        Long time = null;
        String articleKey = (String) crawlerRequestRecord.getHttpRequest().getExtras().get("articleKey");
        String dataId = StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey);
        List<String> texts = html.xpath("//*[@id=\"bd\"]/div[4]/div[1]/div/div[2]/div[1]").all();
        StringBuffer buffer = new StringBuffer();
        String timeStr = html.xpath("//*[@id=\"post_head\"]/div[2]/div[2]/span[2]").get();
        for (String text : texts) {
            buffer.append(text);
        }
        content = buffer.toString().trim();
        List<String> links = html.xpath("//*[@id=\"bd\"]/div[4]/div[1]/div/div[2]/div[1]/img/@src").all();
        StringBuffer imgBuffer = new StringBuffer();
        for (String link : links) {
            imgBuffer.append(link).append("\\x01");
        }
        imageLinks = imgBuffer.toString().trim();
        try {
            String s = timeStr.split("：")[1];
            time = DateUtils.parseDate(s, "yyyy-MM-dd HH:mm:ss").getTime();
        } catch (ParseException e) {
            logger.error(e.getMessage());//
        }
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(dataId)
                .releaseTime(time)
                .addContentKV(Field_Title, title)
                .addContentKV(Field_Author, author)
                .addContentKV(Field_Content, content)
                .addContentKV(Field_Images, imageLinks)
                .build();
        return crawlerData;
    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String commentsStr = httpPage.getHtml().xpath("//*[@id=\"post_head\"]/div[2]/div[2]/span[4]").get();
        String Comments = commentsStr.split("：")[1];
        String articleKey = (String) crawlerRequestRecord.getHttpRequest().getExtras().get("articleKey");
        String dataId = StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey);
        String parentId = StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey);
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(dataId)
                .parentId(parentId)
                .addContentKV(Field_I_Comments, Comments)
                .build();
        return crawlerData;
    }

    public List<CrawlerData> washComments(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        if(httpPage.getRequest().getUrl().equals("http://bbs.tianya.cn/post-me-394599-1.shtml")){
            int i = 0;
        }
        List<CrawlerData> commentDatas = new ArrayList<>();
        Html html = httpPage.getHtml();
        String number = html.xpath("//*[@id=\"post_head\"]/div[2]/div[2]/span[4]/@title").get();
        String invitation = number.substring(number.indexOf("共") + 1, number.indexOf("个"));//回帖数
        String remark = number.substring(number.indexOf("和") + 1, number.lastIndexOf("个"));//评论数
        String articleKey = (String) crawlerRequestRecord.getHttpRequest().getExtras().get("articleKey");
        String parentId = StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey);
        int invCount = 0;
        if (StringUtils.isNotEmpty(invitation))
            invCount = Integer.parseInt(invitation);
        for (int i = 1; i <= invCount; i++) {
            String author = html.xpath("//div[@id=\"" + i + "\"]/div[1]/div[2]/span/a[1]/text()").get();
            List<String> all = html.xpath("//*[@id=\"" + i + "\"]//div[@class=\"bbs-content\"]").all();
            StringBuffer content = new StringBuffer();
            for (String cons : all) {
                content.append(cons);
            }
            Long time = null;
            String replyId = html.xpath("//*[@id=\"" + i + "\"]/@replyid").get();
            String dataId = StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), replyId);
            String timeStr = html.xpath("//div[@id=\"" + i + "\"]/@js_restime").get();
            try {
                if (StringUtils.isNotEmpty(timeStr))
                    time = DateUtils.parseDate(timeStr, "yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                logger.error(e.getMessage());
            }
            if(author ==null || content == null || replyId==null || time==null)
                continue;
            CrawlerData invitationData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(dataId)
                    .parentId(parentId)
                    .releaseTime(time)
                    .addContentKV(Field_Content, content.toString())
                    .addContentKV(Field_Author, author)
                    .build();
            commentDatas.add(invitationData);
            int comCount = 0;
            if (StringUtils.isNotEmpty(remark))
                comCount = Integer.parseInt(remark);
            for (int j = 1; j <= comCount; j++) {
                String remark_author = html.xpath("//div[@id='"+i+"']/div[2]/div[2]/div[3]/div[1]/ul/li[" + j + "]/p/a[1]/text()").get();
                List<String> all1 = html.xpath("//*[@id='" + i + "']/div[2]/div[2]/div[3]/div[1]/ul/li[" + j + "]/span/text()").all();
                StringBuffer remark_content = new StringBuffer();
                for (String rcons : all1) {
                    remark_content.append(rcons);
                }
                if(StringUtils.isNotEmpty(remark_author)){
                    remark_author = remark_author.replace("&#xD;","").replace("：&#xD;","").replace("\\n","").replace(" ","");
                    if(remark.indexOf("n")>=0)
                        remark.substring(remark.indexOf("n")+1,remark.lastIndexOf("："));
                }
                Long remark_time = null;
                String remark_dataId = html.xpath("//*[@id='" + i + "']/div[2]/div[2]/div[3]/div[1]/ul/li[" + j + "]/@id").get();
                String remark_timeStr = html.xpath("//*[@id='" + i + "']/div[2]/div[2]/div[3]/div[1]/ul/li[" + j + "]/p/span[2]/text()").get();
                if (StringUtils.isNotEmpty(remark_dataId))
                    remark_dataId = remark_dataId.substring(remark_dataId.indexOf("-") + 1);
                if (StringUtils.isNotEmpty(remark_timeStr)) {
                    try {
                        remark_time = DateUtils.parseDate(remark_timeStr, "yyyy-MM-dd HH:mm:ss").getTime();
                    } catch (ParseException e) {
                        logger.error(e.getMessage());
                    }
                }
                if(remark_author==null || remark_content==null || remark_dataId==null || remark_time==null) {
                    break;
                }
                    CrawlerData remarkData = CrawlerData.builder()
                            .data(crawlerRequestRecord, httpPage)
                            .dataId(remark_dataId)
                            .parentId(parentId)
                            .releaseTime(remark_time)
                            .addContentKV(Field_Content, remark_content.toString())
                            .addContentKV(Field_Author, remark_author)
                            .build();
                    commentDatas.add(remarkData);
            }
        }
        return commentDatas;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    //解析url的参数
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = url;
        if (url.contains("?")) {
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

    private boolean doHttpPageCheck(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        HttpRequest lastRequest = crawlerRequestRecord.getHttpRequest();
        String lastRequestUrl = lastRequest.getUrl();
        int statusCode = httpPage.getStatusCode();
        Selectable xpath = httpPage.getHtml().xpath("//*[@id=\"main\"]/div[2]/ul/li[2]/text()");
        if (statusCode != 200) {
            logger.error("download page {} error, status code is {}", lastRequestUrl, statusCode);
            return true;
        }
        if (!httpPage.isDownloadSuccess()) {
            logger.error("download page failed, check your link {}", lastRequestUrl);
            return true;
        }
        if (StringUtils.isBlank(httpPage.getRawText())) {
            logger.error("download page empty, check your link {}", lastRequestUrl);
            return true;
        }
        return false;
    }
}
