import climb.NickNameInExcel;
import edu.uci.ics.crawler4j.crawler.CrawlConfig;
import edu.uci.ics.crawler4j.crawler.CrawlController;
import edu.uci.ics.crawler4j.fetcher.PageFetcher;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer;

import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;

/**
 * @author <a href="mailto:hellohesir@gmail.com">Mr_He</a>
 * 2019/9/6 15:23
 */
public class Application {

    private static Lock lock = new ReentrantLock();

    private static Condition condition = lock.newCondition();

    public static void main(String[] args) throws Exception {
        //爬虫线程数
        int numberOfCrawlers = 7;
        //爬虫配置
        CrawlConfig config = new CrawlConfig();
        //爬取深度
        config.setMaxDepthOfCrawling(10);
        //状态存储，用于恢复爬虫状态
        config.setCrawlStorageFolder("/data/climb/");

        /*
         * Instantiate the controller for this crawl.
         */
        PageFetcher pageFetcher = new PageFetcher(config);
        RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
        RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
        CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);

        //要爬取的起始地址
        controller.addSeed("http://www.nibaku.com");

        //启动
        controller.start(NickNameInExcel.class, numberOfCrawlers);

    }
}
