package org.eduapp.trains.crawler;

import java.util.List;

import edu.uci.ics.crawler4j.crawler.CrawlController;

public class TrainDataCrawlerController {
	
	private String urlSeed;
	
	public TrainDataCrawlerController () {
		this.urlSeed = "http://www.tielu.org/TrainList/TrainList-1.html";
	}
	
	public TrainDataCrawlerController (String urlSeed) {
		this.urlSeed = urlSeed; 
	}
	
	public List<Object> startCrawl () throws Exception {
		
		/*
		 * rootfolder is a folder where intermediate crawl data is
		 * stored. 
		 */
		String rootFolder = "D:\\"; //args[0];
		
		/*
		 * numberOfCrawlers shows the number of concurrent threads
		 * that should be initiated for crawling.
		 */
		int numberOfCrawlers = 1; //Integer.parseInt(args[1]);
		
		/*
		 * Instantiate the controller for this crawl. Note that if you want
		 * your crawl to be resumable (meaning that you can resume the crawl
		 * from a previously interrupted/crashed crawl) you can either set
		 * crawler.enable_resume to true in crawler4j.properties file or you
		 * can use the second parameter to the CrawlController constructor.
		 * 
		 * Note: if you enable resuming feature and want to start a fresh
		 * crawl, you need to delete the contents of rootFolder manually.
		 */
		CrawlController controller = new CrawlController(rootFolder);
		
		/*
		 * For each crawl, you need to add some seed urls.
		 * These are the first URLs that are fetched and
		 * then the crawler starts following links which
		 * are found in these pages
		 */
		controller.addSeed(urlSeed);
		//controller.addSeed("http://www.ics.uci.edu/~lopes/");
		//controller.addSeed("http://www.ics.uci.edu/");
		
		/*
		 * Be polite:
		 * Make sure that we don't send more than 5 requests per 
		 * second (200 milliseconds between requests).
		 */
		controller.setPolitenessDelay(500);
		
		/*
		 * Optional:
		 * You can set the maximum crawl depth here.
		 * The default value is -1 for unlimited depth
		 */
		controller.setMaximumCrawlDepth(-1);
		
		/*
		 * Optional:
		 * You can set the maximum number of pages to crawl.
		 * The default value is -1 for unlimited depth
		 */
		controller.setMaximumPagesToFetch(-1);
		
		/*
		 * Do you need to set a proxy?
		 * If so, you can use: 
		 * controller.setProxy("proxyserver.example.com", 8080);
		 * OR
		 * controller.setProxy("proxyserver.example.com", 8080, username, password);
		 */
		
		//controller.setProxy("10.159.32.155", 8080);
		
		/*
		 * Note: you can configure several other parameters by modifying 
		 * crawler4j.properties file
		 */
		
		/*
		 * Start the crawl. This is a blocking operation, meaning
		 * that your code will reach the line after this only when
		 * crawling is finished.
		 */
		controller.start(TrainDataCrawler.class, numberOfCrawlers);
		
		return controller.getCrawlersLocalData();
	}

	public void setUrlSeed(String urlSeed) {
		this.urlSeed = urlSeed;
	}

	public String getUrlSeed() {
		return urlSeed;
	}

	
}
