package com.nlp.visualization.crawler.processor;

import com.nlp.visualization.common.CONSTANTS;
import com.nlp.visualization.crawler.pipeline.BufferPipeline;
import com.nlp.visualization.crawler.pipeline.JsonBuffer;
import com.nlp.visualization.utils.TimeHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.scheduler.QueueScheduler;

import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;

import static com.nlp.visualization.common.CONSTANTS.CRAWLER_THREAD_COUNT;

/**
 * @author LXM
 * @Title: ProcessorCentre
 * @Description: 利用反射调用处理不同网站的任务类，来爬取不同站点
 * @date 2018/3/3下午9:33
 */
public class ProcessorCentre implements PageProcessor {

    // 部分一：抓取网站的相关配置，包括编码、抓取间隔、重试次数等
    private Site site = Site.me().setRetryTimes(CONSTANTS.CRAWLER_RETRY_TIME).setSleepTime(CONSTANTS.CRAWLER_RETRY_WAITING_TIME);

    private Logger logger = LoggerFactory.getLogger(ProcessorCentre.class);

    private Spider spider;

    private static String date;


    @Override
    // process是定制爬虫逻辑的核心接口，在这里编写抽取逻辑
    public void process(Page page) {
        // 部分二：定义如何抽取页面信息，并保存下来
        String source = getSourceFromPage(page);
        page.putField("resource", source);

        //使用反射，实例一个处理任务类
        logger.info("开始爬取【" + source + "】站点的新闻链接。");
        Class onwClass = null;
        try {
            onwClass = Class.forName(ProcessorUtil.getClass(source.toLowerCase()));
            Processor processor = (Processor) onwClass.newInstance();
            processor.processor(page, date);
        } catch (ClassNotFoundException e) {
            e.printStackTrace();
        } catch (IllegalAccessException e) {
            e.printStackTrace();
        } catch (InstantiationException e) {
            e.printStackTrace();
        }

    }

    @Override
    public Site getSite() {
        return site;
    }

    /**
     * 获取站点的名称，如sina，163
     *
     * @param page
     * @return
     */
    private String getSourceFromPage(Page page) {
//        String type = page.getUrl().regex("http\\w?://\\S+\\.(\\S+)\\.com").toString();
//        if (type != null && !"".equals(type)) {
//            type = type.substring(0, 1).toUpperCase() + type.toLowerCase().substring(1);
//        }
        if (page.getUrl().toString().indexOf("sina")!=-1)
            return "sina";
        else return "";
    }

    /**
     * 指定日期爬取初始化
     *
     * @param sourceList
     * @param date
     * @throws IllegalStateException
     * @throws ClassNotFoundException
     * @throws IllegalAccessException
     * @throws InstantiationException
     */
    public void crawel(List<String> sourceList, String date) throws IllegalStateException, ClassNotFoundException, IllegalAccessException, InstantiationException {
        this.date = date;
        for (String source : sourceList) {
            logger.info("初始化爬虫！");
            //按照每天日期更新scheduler
            QueueScheduler queueScheduler = new QueueScheduler();
            spider = Spider.create(new ProcessorCentre())
                    .addPipeline(new BufferPipeline())
                    .setScheduler(queueScheduler)
                    .thread(CRAWLER_THREAD_COUNT);
            //调用各个网站processor初始化函数
            Class onwClass = Class.forName(ProcessorUtil.getClass(source));
            Processor processor = (Processor) onwClass.newInstance();
            processor.init(spider);
            logger.info(String.format("指定爬取日期为：", date));
            logger.info(String.format("载入网站:【%s】配置。", source));
            spider.run();
            spider.close();
            //以当天保存数据
            JsonBuffer.getInstance().saveTodayNews(source, date);
        }

    }

    /**
     * 爬取最新一天的新闻爬虫初始化
     *
     * @param sourceList
     * @throws IllegalStateException
     * @throws ClassNotFoundException
     * @throws IllegalAccessException
     * @throws InstantiationException
     */
    public void crawel(List<String> sourceList) throws IllegalStateException, ClassNotFoundException, IllegalAccessException, InstantiationException {
        SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
        this.date = sdf.format(new Date());
        for (String source : sourceList) {
            logger.info("初始化爬虫！");
            //按照每天日期更新scheduler
            QueueScheduler queueScheduler = new QueueScheduler();
            spider = Spider.create(new ProcessorCentre())
                    .addPipeline(new BufferPipeline())
                    .setScheduler(queueScheduler)
                    .thread(CRAWLER_THREAD_COUNT);

            //调用各个网站processor初始化函数
            Class onwClass = Class.forName(ProcessorUtil.getClass(source));
            Processor processor = (Processor) onwClass.newInstance();
            processor.init(spider);
            logger.info(String.format("载入网站:【%s】配置。", source));
            spider.run();
            spider.close();

            //以当天保存数据
            JsonBuffer.getInstance().saveTodayNews(source, date);
        }
    }


}
