package com.chance.cc.crawler.development.scripts.bbs1;

import com.alibaba.fastjson.JSON;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;

public class BBs2CrawlerScript extends CrawlerCommonScript {
    private Logger log = LoggerFactory.getLogger(BBs2CrawlerScript.class);
    //下一页正则匹配路径
    public static String temporary = null;//临时存放
    //解析链接
    private Integer next = 1;
    public static final String listUrlPrefix = "http://bbs1.people.com.cn/board/1/";
    private static final String URL = "http://bbs1.people.com.cn/post/1/1/2/";//拼接的路径名称
    private static final String HTML = ".html";
    private static final String DETAILS = "http://bbs1.people.com.cn/\\S*post/\\d{1}/\\d{1}/\\d{1}/\\d*\\.html";//详情

    private static final String TURNPAGE = "http://bbs1.people.com.cn/board/1\\S*";//翻页


    /**
     * 脚本domain定义
     * @return
     */
    public String domain() {
        return "bbs2";
    }/**
     * 进入脚本的正则列表
     */
    public void initUrlRegulars() {//匹配对应的拿到的解析链接
        addUrlRegular(TURNPAGE); //翻页
        addUrlRegular(DETAILS);  //详情
        addUrlRegular("http://bbs1.people.com.cn/content_sync.do\\?postId=\\d{9}");//主内容页
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     * @param crawlerRequestRecord
     * @return
     */
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        //创建一个队列
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();
        //得到此时传入的路径  然后进行不同的解析
        String requestUrl = httpPage.getRequest().getUrl();
        //判断是否 解析互动量  或者 评论
        String label = this.label(crawlerRequestRecord);
        //解析下一页链接
        if(requestUrl.matches(TURNPAGE)){
            //添加进入record数组
            parsedLinks.add(this.analysisNextPage(crawlerRequestRecord));
        }

        //解析列表页
        if(requestUrl.matches(TURNPAGE)){
            List<Selectable> nodes = httpPage.getHtml().xpath("//p[@class=\"treeTitle\"]").nodes();//得到清洗的节点
            for (Selectable node : nodes) {
                String itemUrl = node.xpath("./a/@href").get();//得到需要清洗的内容路径
                String itemUrl1 = "http://bbs1.people.com.cn/content_sync.do?postId="+itemUrl.substring(37,46);//得到article文章主体内容的链接
                if (StringUtils.isBlank(itemUrl)){
                    continue;
                }
                //当带有前缀[-]时 日期路径不同
                String type = node.xpath("./span[1]/text()").get();
                //获取到时间
                Long releaseTime = this.initReleaseTime(type, node);
                String author = node.xpath("./span/a[@target=\"_blank\"]/font").get();//作者信息
                String title = node.xpath("./a[@class=\"treeReply\"]//text()[2]").get();//标题
                if(title == null){
                    title =  node.xpath("./a[@class=\"treeReply\"]//text()").get();
                }
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        //翻页明细标识  过滤的请求标识   得到domain标识  进行个性化优先级配置
                        //当前的采集时间  当前的调度时间 是否需要再次进入解析详情页的链接true   是否清洗页面 true
                        .itemPageRequest(crawlerRequestRecord)
                        //记录唯一链接
                        .recordKey(itemUrl1)
                        //记录下载链接
                        .httpUrl(itemUrl1)
                        //当前页面的发布时间
                        .releaseTime(releaseTime)
                        //分开数据类型
                        .resultLabelTag(CrawlerDataType.article)
                        .copyBizTags()
                        .build();
                //只能在此页面得到的 文章作者以及文章发布时间
                itemRecord.tagsCreator().requestTags().getCategoryTag().addKVTag("author",author);
                itemRecord.tagsCreator().requestTags().getCategoryTag().addKVTag("title",title);
                itemRecord.tagsCreator().requestTags().getCategoryTag().addKVTag("releaseTime",releaseTime);
                //添加仅record队列中
                parsedLinks.add(itemRecord);
                //互动量
                CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                        //翻页明细的标识  过滤请求的标识  得到domain的标识  进行个性化优先级设置
                        //当前的采集时间  当前的调度时间  是否需要再次进入解析详情页的链接true  是否清洗页面true
                        .itemPageRequest(crawlerRequestRecord)
                        //页面链接的唯一标识
                        .recordKey(itemUrl)
                        //记录下载的链接进行下载
                        .httpUrl(itemUrl)
                        //当前页面的发布时间
                        .releaseTime(releaseTime)
                        .copyBizTags()
                        //清洗互动量
                        .resultLabelTag(interaction)
                        .build();
                //添加进record
                parsedLinks.add(interactionRecord);
            }//
        }else if(requestUrl.matches(DETAILS) && crawlerRequestRecord.tagsCreator().resultTags().hasDataType(interaction)){// //div[@class]replayWrap
                List<Selectable> node1 = httpPage.getHtml().xpath("/html/body/div[2]/div[3]/div[3]/ul/li/ul/li").nodes();
                for (Selectable node2:node1) {
                    parsedLinks.add(this.analysisComment(crawlerRequestRecord,node2,httpPage));
            }
        }
        return parsedLinks;
    }

    //判断是否进入清洗哪种数据类型
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)){
            crawlerDataList.add(washArticle(crawlerRecord,page));
        }

        if (crawlerRecord.tagsCreator().resultTags().hasDataType(interaction)){
            crawlerDataList.add(washInteraction(crawlerRecord,page));
        }
        if (crawlerRecord.tagsCreator().resultTags().hasDataType(comment)){
            crawlerDataList.add(washCommentaction(crawlerRecord,page));
        }
        // todo
        return crawlerDataList;
    }
    //解析评论数
    private CrawlerData washCommentaction(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        String author = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getKVTagStrVal("author");
        String comments = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getKVTagStrVal("comments");
        String itemUrl = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getKVTagStrVal("parentUrl");
        String domain = crawlerRecord.tagsCreator().bizTags().domain();
        String commentKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
        long dataId = System.currentTimeMillis();
        CrawlerData crawlerData = CrawlerData.builder()
                //设定此数据的一些默认值  当前的record值继承   当前的链接  页面的发布时间  调度时间 采集时间 如果前面没设置就重新设置
                .data(crawlerRecord,page)
                .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), comment.enumVal(), dataId))
                .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), commentKey))
                .resultLabelTag(comment)
                .url(itemUrl)
                .releaseTime(crawlerRecord.getReleaseTime())
                .addContentKV(AICCommonField.Field_Content,comments)
                .addContentKV(AICCommonField.Field_Author,author)
                .build();
        return crawlerData;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        try {
            List<String> texts = new ArrayList<>();
                //查询主体内容
                texts = page.getHtml().xpath("/html/body//p/font/text()").all();
                if(texts.size() == 0) {
                    texts = page.getHtml().xpath("/html/body//text()").all();//拿不到大多数不同页面的值
            }
            //如果标题存在  那么进入存放队列中
                StringBuffer conents = new StringBuffer();
                int a = 0;
                for (String text : texts) {
                    conents.append(text).append(" ");
                }
                String itemUrl = page.getRequest().getUrl();
                String parent = itemUrl.substring(49);
                //拼接的路径名
                itemUrl = URL + parent + HTML;
                String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
                //从前面取出作者信息和时间
                String author = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getKVTagStrVal("author");
                String title = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getKVTagStrVal("title");
                String domain = crawlerRecord.tagsCreator().bizTags().domain();
                Long releaseTime = Long.valueOf(crawlerRecord.tagsCreator().requestTags().getCategoryTag().getKVTagStrVal("releaseTime"));
                CrawlerData crawlerData = CrawlerData.builder()
                        .data(crawlerRecord, page)//存放了各种此链接的信息
                        //唯一标识符
                        .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerDataType.article.enumVal(), articleKey))
                        //此链接的请求路径
                        .url(itemUrl)
                        //确定数据类型
                        .resultLabelTag(CrawlerDataType.valueOf("article"))//必须要加，用来确定数据是什么类型
                        //确定发布时间
                        .releaseTime(releaseTime)//必须要加,确定数据是什么时间的数据
                        //文章内容
                        .addContentKV(AICCommonField.Field_Content,conents.toString())
                        //标题
                        .addContentKV(AICCommonField.Field_Title,title)
                        //作者
                        .addContentKV(AICCommonField.Field_Author,author)
                        .build();
                return crawlerData;

        } catch (Exception e) {
            log.error(e.getMessage(),e);
        }
        return null;

    }
    private CrawlerData washInteraction(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        try {
            String views = page.getHtml().xpath("//./p[@class=\"replayInfo\"]/span/span[1]/text()").get();//浏览数
            String comments = page.getHtml().xpath("//./p[@class=\"replayInfo\"]/span/span[2]/text()").get();//回复数
            String  time = page.getHtml().xpath("//./p[@class=\"replayInfo\"]/span/text()[3]").get();//时间
            String  likes = page.getHtml().xpath("//./p[@class=\"replayInfo\"]/span[2]/text()").get();//点赞数
            if(likes.matches("赞")){
                likes = "0";
            }else{
                likes = likes.substring(0,likes.length()-1).substring(2);
            }
            time = time.substring(3,19);
            long releaseTime = 0;
            releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd HH:mm").getTime();
            String itemUrl = page.getRequest().getUrl();
            String interactionKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];;
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)//存放此链接的各种信息 共同其他地方需要时调用
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), CrawlerEnum.CrawlerDataType.interaction.enumVal(), interactionKey))
                    .url(itemUrl)
                    .parentId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), interactionKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                    .releaseTime(releaseTime)
                    .addContentKV(AICCommonField.Field_I_Views,views)
                    .addContentKV(AICCommonField.Field_I_Comments, comments)
                    .addContentKV(AICCommonField.Field_I_Likes,likes)
                    .build();
            return crawlerData;
        } catch (Exception e) {
            log.error(e.getMessage(),e);
        }
        return null;
    }

    //解析评论链接
    private CrawlerRequestRecord analysisComment(CrawlerRequestRecord crawlerRequestRecord, Selectable node2,HttpPage httpPage) {
        String author = node2.xpath("./p/a[1]/text()").get();//评论作者
        String comments = node2.xpath("./p/a[2]/text()").get();//评论内容
        String commentUrl = node2.xpath("./p/a[@class=\"treeReply\"]/@href").get();//评论链接
        String time = node2.xpath("./p/span[1]/text()").get(); //评论时间
        time = time.substring(1, 17);
        String parentUrl = httpPage.getRequest().getUrl();
        try {
            long releaseTime = 0;
            releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd HH:mm").getTime();
            //得到评论去重的信息
            KVTag filter_info = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("comment_record_filter_info");
            CrawlerRecord filterInfoRecord = JSON.parseObject((String) filter_info.getVal(), CrawlerRecord.class);
            CrawlerRequestRecord commnentRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .releaseTime(releaseTime)
                    .recordKey(commentUrl)
                    .httpUrl(commentUrl)
                    .notFilterRecord()
                    .resultLabelTag(comment)
                    .build();
            commnentRecord.tagsCreator().requestTags().getCategoryTag().addKVTag("parentUrl", parentUrl);
            commnentRecord.tagsCreator().requestTags().getCategoryTag().addKVTag("author", author);
            commnentRecord.tagsCreator().requestTags().getCategoryTag().addKVTag("comments", comments);
            //设置过滤条件的评论条件   昨日最新回复
            commnentRecord.setFilter(filterInfoRecord.getFilter());
            commnentRecord.setFilterInfos(filterInfoRecord.getFilterInfos());
            return commnentRecord;
        } catch (ParseException e) {
            e.printStackTrace();
        }
        return null;
    }
    //清洗日期
    private String pubTime(String pubTime){
        char[] chars = pubTime.toCharArray();
        char[] chars1 = new char[chars.length];
        for (int i = 0; i < chars.length; i++) {
            chars1[i] = chars[chars.length - i - 1];
        }
        pubTime = new String(chars1);
        //pubTime = pubTime.substring(7,20);
        pubTime = pubTime.substring(5,25);
        char[] chars2 = pubTime.toCharArray();
        char[] chars3 = new char[chars2.length];
        for (int i = 0; i < chars2.length; i++) {
            chars3[i] = chars2[chars2.length - i - 1];
        }
        String s8 = new String(chars3).trim();
        System.out.println(s8);
        if(s8.matches("\\d{4}\\-\\d{1,2}\\-\\d{1,2} \\d{1,2}:\\d{1,2}")){
            s8 = s8.substring(5);
            StringBuilder s9 = new StringBuilder(s8);
            s9 = s9.delete(2,3);
            s9 = s9.insert(2,"月");
            s9 = s9.insert(5,"日");
            String s91 = new String(s9);
            System.out.println(s91);
            return s91;
        }else{
            return s8;
        }

    }
    //解析不同的时间
    private Long initReleaseTime(String type,Selectable node){
        String pubTime = null;
        if(type.equals("[-]")){
            pubTime = node.xpath("./span[2]/text()[2]").get();
            pubTime = this.pubTime(pubTime);
        }else{
            pubTime = node.xpath("./span[1]/text()[2]").get();
            pubTime = this.pubTime(pubTime);
        }
        if (!pubTime.contains("年")) {
            Calendar date = Calendar.getInstance();
            String year = String.valueOf(date.get(Calendar.YEAR));
            pubTime = year + "年" + pubTime;
        }
        long releaseTime = 0;
        try {
            releaseTime = DateUtils.parseDate(pubTime, "yyyy年MM月dd日 HH:mm").getTime();
        } catch (ParseException e) {
            e.printStackTrace();
        }
        return releaseTime;
    }
    //判断是否 解析互动量  或者 评论
    private String label(CrawlerRequestRecord crawlerRequestRecord){
        String label = null;
        LabelTag labelArticle = crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().getLabelTag("article");
        LabelTag labelInteraction = crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().getLabelTag("interaction");
        LabelTag labelComment = crawlerRequestRecord.tagsCreator().resultTags().getCategoryTag().getLabelTag("comment");
        if(labelArticle != null){
            label = labelArticle.getLabel();
        }else if(labelInteraction != null){
            label = labelInteraction.getLabel();
        }else  if(labelComment != null){
            label = labelComment.getLabel();
        }
        return label;
    }
    //解析下一页链接
    private CrawlerRequestRecord  analysisNextPage(CrawlerRequestRecord crawlerRequestRecord){
        String nextPageUrl = listUrlPrefix+"1_"+(++next)+".html";//获得下一页的路径
        System.out.println(nextPageUrl);
        //将下一页的链接放入
        CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                //进行属性设置翻页请求  过滤请求 复制 当前的定时标记 得到domain标识 进行个性化配置 当前的采集时间 当前的调度时间 是否进入解析
                .turnPageRequest(crawlerRequestRecord)
                //记录下一页的唯一链接
                .recordKey(nextPageUrl)
                //确定下一个需要进行爬取的链接
                .httpUrl(nextPageUrl)
                //当前采集时间设置
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()//用来复制上一个record的bussiusTag
                .build();
        return turnPageRequest;
    }
    //判断日期范围
    private boolean timeJudge(String temporary){
        if (temporary == null){
            return false;
        }
        temporary=temporary.substring(3,5);
        Integer i = Integer.valueOf(temporary);
        SimpleDateFormat data = new SimpleDateFormat("dd");
        String s = data.format(new Date());
        Integer x = Integer.valueOf(s);
        return (x-i)==8;
    }
    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

}
