package com.nlp.visualization.crawler.launcher;

import com.nlp.visualization.common.CONSTANTS;
import com.nlp.visualization.crawler.pipeline.SegmentTask;
import com.nlp.visualization.crawler.pipeline.SentenceTask;
import com.nlp.visualization.crawler.processor.CommonProcessor;
import com.nlp.visualization.pojo.NLP.seg.SegmentEntity;
import com.nlp.visualization.pojo.NLP.sen.SentenceEntity;
import org.springframework.stereotype.Service;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.pipeline.FilePipeline;
import us.codecraft.webmagic.pipeline.Pipeline;

/**
 * @author LXM
 * @Title: ${file_name}
 * @Description: ${todo}
 * @date 2018/3/13下午8:04
 */
@Service
public class StartCommon {


    /**
     * 开启爬取并分词
     *
     * @param pipeline
     * @param URLs
     * @return
     */
    public SegmentEntity startCrawlerAndSegment(SegmentTask pipeline, String... URLs) {
        Spider.create(new CommonProcessor())
                .addUrl(URLs)
                .addPipeline(pipeline)
                //开启5个线程抓取
                .thread(5)
                //启动爬虫
                .run();
        return pipeline.getSegmentEntity();
    }


    /**
     * 开启爬取并句法分析
     *
     * @param pipeline
     * @param URLs
     * @return
     */
    public SentenceEntity startCrawlerAndSentence(SentenceTask pipeline, String... URLs) {
        Spider.create(new CommonProcessor())
                .addUrl(URLs)
                .addPipeline(pipeline)
                //开启5个线程抓取
                .thread(5)
                //启动爬虫
                .run();
        return pipeline.getEntity();
    }


    public static void main(String[] args) {
        Spider.create(new CommonProcessor())
                //从"https://github.com/code4craft"开始抓
                .addUrl("http://sports.sina.com.cn/china/afccl/2018-03-13/doc-ifyscsmv2170853.shtml")
                .addPipeline(new FilePipeline())
                //开启5个线程抓取
                .thread(5)
                //启动爬虫
                .run();
    }
}
