package com.chance.cc.crawler.development.scripts.bbs;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.record.builder.CrawlerRequestRecordBuilder;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/11/18 17:53
 * @Description 浙大论坛-梧桐树
 **/
public class BBSCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(BBSCrawlerScript.class);

    private static final List<String> modelList = Arrays.asList("94", "41", "99", "11", "20", "42", "71", "73", "76", "32", "43", "75", "33", "34", "35", "84", "81", "82", "83", "85", "91", "93");

    private static final String entranceUrl = "http://www.zju1.com/";//入口地址
    private static final String modelNextUrl = "http://www.zju1.com/cxz.asp\\S*";//下一页的地址
    private static final String articleUrl = "http://www.zju1.com/showbbs.asp\\?id=\\d+&totable=\\d+";//文章详情地址
    private static final String commentUrl = "http://www.zju1.com/showbbs.asp\\?topage=\\d+&bd=\\d+&id=\\d+&totable=\\d+";//评论多页时候的地址
    private static final String commentUrlT = "http://www.zju1.com/showbbs.asp\\?id=\\d+&totable=\\d+#";//评论只有一页的时候的地址
    private static final String commentArticleUrl = "http://www.zju1.com/showbbs.asp\\?id=\\d+&totable=\\d+##p=\\S*|http://www.zju1.com/showbbs.asp\\?topage=\\d+&bd=\\d+&id=\\d+&totable=\\d+#p\\S*";//评论文章地址

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "bbs";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(entranceUrl);
        addUrlRegular(modelNextUrl);
        addUrlRegular(articleUrl);
        addUrlRegular(commentUrl);
        addUrlRegular(commentUrlT);
        addUrlRegular(commentArticleUrl);

    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();

        if (requestUrl.matches(entranceUrl)) {
            //入口地址解析板块地址
            for (String modelLink : modelList) {
                modelLink = requestUrl + "cxz.asp?bd=" + modelLink + "&topage=1";

                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(modelLink)
                        .recordKey(modelLink)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .build();

                parsedLinks.add(itemRecord);
            }
        } else if (requestUrl.matches(modelNextUrl)) {
            //下一页地址解析
            String[] split = requestUrl.split("topage=");
            String nextPageUrl = split[0] + "topage=" + (Integer.parseInt(split[1]) + 1);

            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .build();
            parsedLinks.add(turnPageRequest);

            //文章地址解析
            List<Selectable> nodes = httpPage.getHtml().xpath("//table[@class=\"biao a2\"]/tbody/tr").nodes();
            for (int i = 1; i < nodes.size(); i++) {
                String itemUrl = nodes.get(i).xpath("./td[@class=\"zt2\"]/a/@href").get();
                if (StringUtils.isEmpty(itemUrl)) {
                    continue;
                }

                String releaseTime = nodes.get(i).xpath("./td/@title").get();
                if (StringUtils.isEmpty(releaseTime)) {
                    continue;
                }

                try {
                    itemUrl = entranceUrl + StringEscapeUtils.unescapeHtml4(itemUrl);
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy/MM/dd HH:mm:ss").getTime();

                    CrawlerRequestRecordBuilder crawlerRequestRecordBuilder = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .resultLabelTag(article)
                            .resultLabelTag(interaction);

                    Map<String, Object> extras = new HashMap<>();
                    String bd = requestUrl.substring(requestUrl.indexOf("bd") + 3, requestUrl.indexOf("&"));
                    extras.put("bd", bd);

                    List<String> all = nodes.get(i).xpath("./td[@title]/preceding-sibling::td[1]//text()").all();
                    String views = "0";
                    String comments = "0";
                    StringBuffer stringBuffer = new StringBuffer();
                    for (String data : all) {
                        data = StringEscapeUtils.unescapeHtml4(data).replaceAll("\r", "").replaceAll("\n", "");
                        stringBuffer.append(data);
                    }
                    if (StringUtils.isNotEmpty(stringBuffer)) {
                        String[] split1 = stringBuffer.toString().split("/");
                        views = split1[0];
                        comments = split1[1];
                    }
                    extras.put("views", views);
                    extras.put("comments", comments);

                    CrawlerRequestRecord itemRecord = crawlerRequestRecordBuilder.build();
                    itemRecord.getHttpRequest().setExtras(extras);
                    parsedLinks.add(itemRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        } else if (requestUrl.matches(articleUrl)) {
            String nextUrl = "";
            List<String> all = httpPage.getHtml().xpath("//td[@class=\"page\"]/a/@href").all();
            if (all.size() > 1) {
                nextUrl = requestUrl.split("\\?")[0] + StringEscapeUtils.unescapeHtml4(all.get(all.size() - 1));
            } else if (all.size() == 1) {
                nextUrl = requestUrl + StringEscapeUtils.unescapeHtml4(all.get(all.size() - 1));
            }
            String title = StringEscapeUtils.unescapeHtml4(httpPage.getHtml().xpath("//td[@class=\"zt4\"]//text()").get());

            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .recordKey(nextUrl)
                    .httpUrl(nextUrl)
                    .resultLabelTag(comment)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .build();
            Map<String,Object> extras = new HashMap<>() ;
            extras.putAll(httpPage.getRequest().getExtras());
            extras.put("title", title);
            turnPageRequest.getHttpRequest().setExtras(extras);

            parsedLinks.add(turnPageRequest);
        } else if (requestUrl.matches(commentUrl) || requestUrl.matches(commentUrlT)) {
            //下一页评论地址的解析
            String nextUrl = "";
            String p = "1";
            List<Selectable> nodes = httpPage.getHtml().xpath("//table[@class=\"biao\"]").nodes();
            if (requestUrl.matches(commentUrl)) {
                String[] split = requestUrl.split("topage=");
                String[] split1 = split[1].split("&bd");
                nextUrl = split[0] + "topage=" + (Integer.parseInt(split1[0]) - 1) + "&bd" + split1[1];
                p = split1[0];
            }
            if (StringUtils.isNotEmpty(nextUrl)) {
                String releaseTime = nodes.get(nodes.size() - 1).xpath(".//p[@class=\"ds\"]/text()").get();
                releaseTime = getString("\\d{4}/\\d*/\\d* \\d*:\\d*:\\d*", releaseTime);
                if(StringUtils.isNotEmpty(releaseTime)){
                    try {
                        long releaseTimeToLong =  DateUtils.parseDate(releaseTime, "yyyy/MM/dd HH:mm:ss").getTime();
                        CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(nextUrl)
                                .httpUrl(nextUrl)
                                .resultLabelTag(comment)
                                .releaseTime(releaseTimeToLong)
                                .build();

                        turnPageRequest.getHttpRequest().setExtras(httpPage.getRequest().getExtras());
                        parsedLinks.add(turnPageRequest);
                    } catch (ParseException e) {
                        e.printStackTrace();
                    }
                }
            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();

        int statusCode = page.getStatusCode();
        String requestUrl = page.getRequest().getUrl();
        if(statusCode == 200){
            if (crawlerResultTags.hasDataType(article)) {
                crawlerDataList.add(washArticle(crawlerRecord, page));

            }

            if (crawlerResultTags.hasDataType(interaction)) {
                crawlerDataList.add(washInteraction(crawlerRecord, page));
            }

            if (crawlerResultTags.hasDataType(comment)) {
                List<Selectable> nodes = page.getHtml().xpath("//table[@class=\"biao\"]").nodes();
                if(nodes.size() > 1){
                    for (int i = nodes.size() - 1; i > 0; i--) {
                        crawlerDataList.add(washComment(crawlerRecord,page,nodes.get(i)));
                    }
                }else{
                    log.info("文章(“" + requestUrl +"”)没有回复！");
                }
            }
        }else{
            log.error("页面(“" +requestUrl+ "”)下载错误！状态码："+ statusCode);
        }
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        Selectable selectable = httpPage.getHtml().xpath("//table[@class=\"biao\"]").nodes().get(0);

        List<String> articleTextList = selectable.xpath(".//td[@class=\"tznr\"]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }

        String author = selectable.xpath(".//td[@class=\"a2 se3\"]/b/a/text()").get();
        String releaseTime = selectable.xpath(".//p[@class=\"ds\"]/text()").get();
        if(StringUtils.isNotEmpty(releaseTime)){
            releaseTime = getString("\\d{4}/\\d*/\\d* \\d*:\\d*:\\d*", releaseTime);
        }else{
            releaseTime = null;
        }

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("id") + 3, itemUrl.lastIndexOf("&"));

        if(StringUtils.isNotEmpty(releaseTime)){
            CrawlerData crawlerData = null;
            try {
                crawlerData = CrawlerData.builder()
                        .data(crawlerRequestRecord, httpPage)
                        .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                        .url(itemUrl)
                        .releaseTime(DateUtils.parseDate(releaseTime, "yyyy/MM/dd HH:mm:ss").getTime())
                        .addContentKV(Field_Content, conents.toString().trim())
                        .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(httpPage.getHtml().xpath("//td[@class=\"zt4\"]//text()").get()))
                        .addContentKV(Field_Author, author)
                        .build();
            } catch (ParseException e) {
                e.printStackTrace();
            }

            return crawlerData;
        }
        return null;
    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("id") + 3, itemUrl.indexOf("&"));

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                .url(itemUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_I_Comments, (String) httpPage.getRequest().getExtras().get("comments"))
                .addContentKV(Field_I_Views, (String) httpPage.getRequest().getExtras().get("views"))
                .build();
        return crawlerData;
    }

    public CrawlerData washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,Selectable node) {
        String releaseTime = node.xpath(".//p[@class=\"ds\"]/text()").get();
        releaseTime = getString("\\d{4}/\\d*/\\d* \\d*:\\d*:\\d*", releaseTime);

        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("id") + 3, itemUrl.lastIndexOf("&"));

        String author = node.xpath(".//td[@class=\"a2 se3\"]/b/a/text()").get();
        List<String> articleTextList = node.xpath(".//td[@class=\"tznr\"]//text()").all();
        StringBuffer conents = new StringBuffer();
        for (String articleText : articleTextList) {
            conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
        }

        String commentUrl = StringEscapeUtils.unescapeHtml4(node.xpath(".//p[@class=\"ds\"]/a/@href").get());
        String commentId = commentUrl.substring(commentUrl.indexOf("bbsid") + 6,commentUrl.indexOf("&"));

        CrawlerData crawlerData = null;
        try {
            crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), comment.enumVal(), commentId))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("comment"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime, "yyyy/MM/dd HH:mm:ss").getTime())
                    .addContentKV(Field_Content, conents.toString().trim())
//                    .addContentKV("title", (String) httpPage.getRequest().getExtras().get("title"))
                    .addContentKV(Field_Author, author)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.filter)
                    .requestLabelTag(CrawlerEnum.CrawlerRequestType.result)
                    .build();
        } catch (ParseException e) {
            log.warn("时间格式错误！时间是：" + releaseTime);
        }

        return crawlerData;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

}
