package com.morpx.sim.apps;

import com.morpx.sim.crawl.VipCrawler;

import edu.uci.ics.crawler4j.crawler.CrawlConfig;
import edu.uci.ics.crawler4j.crawler.CrawlController;
import edu.uci.ics.crawler4j.fetcher.PageFetcher;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer;


/**
 * A crawler thread that will crawl a web site to get new images and update
 * object status. 
 * @author tianli
 *
 */
public class CrawlerThread extends Thread{
  // Crawling Controller.
  public static void main(String[] args) throws Exception {
    if (args.length < 3) {
            System.out.println("Needed parameters: ");
            System.out.println("\t storageFolder (a folder for storing downloaded images)");
            System.out.println("\t numberOfCralwers (number of concurrent threads)");
            System.out.println("\t crawl address");
            System.out.println("\t [categories]");
            return;
    }
    
    String category = null;
    if (args.length == 4) {
      category = args[3];
    }

    String storageFolder = args[0];
    int numberOfCrawlers = Integer.parseInt(args[1]);
    String crawlDomain = args[2];
    
    CrawlConfig config = new CrawlConfig();

    config.setCrawlStorageFolder(storageFolder + "_temp");
    config.setMaxDepthOfCrawling(VipCrawler.MAX_CRAWLING_DEPTH);

    /*
     * Since images are binary content, we need to set this parameter to
     * true to make sure they are included in the crawl.
     */
    config.setIncludeBinaryContentInCrawling(true);
    PageFetcher pageFetcher = new PageFetcher(config);
    
    RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
    RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
    CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
    
    controller.addSeed(crawlDomain);
    
    VipCrawler.configure(storageFolder, category);

    controller.start(VipCrawler.class, numberOfCrawlers);
  }
}
