package com.barkerton.crawler;

import java.util.ArrayList;
import java.net.MalformedURLException;
import java.net.URL;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import com.barkerton.crawler.parser.PageAnalyzer;
import com.barkerton.crawler.queuing.CrawlerQueue;
import com.barkerton.crawler.queuing.PageQueue;
import com.barkerton.crawler.util.PropertyManager;
import com.barkerton.crawler.util.Util;

/**
 * Main class that initiates all associated threads and starts
 * up the crawler aka jrawler.
 * 
 * @author c.barker
 * 
 */
public class Main {

	private static final Log log = LogFactory.getLog(Main.class);
	
	private URL urlSeed;
	private String seedFilePath;
	private Object[] seedUrls;
	private int crawlDepth;
	private boolean siteOnlyCrawl;
	
	public Main() {
		urlSeed = null;
		seedFilePath = null;
		crawlDepth = 0;
		siteOnlyCrawl = false;
	}
	
	/**
	 * Parses command arguments and validates their options.
	 * Method will exit program if arguments are invalid.
	 * -u individual URL to crawl
	 * -f path to seed file for loading batch of URLs to crawl
	 * -d crawl depth number
	 * -s denotes site only crawl
	 */
	private void parseArgs(String[] args) {

		if (args.length < 2) {
			System.err.println(getUsage());
			System.exit(1);
		}
			
		for (int i=0; i < args.length; i++) {
			if ( args[i].trim().equalsIgnoreCase("-u") ) {
				if (i+1 >= args.length) {
					System.err.println(getUsage());
					System.exit(1);
				}
				
				try {
					this.urlSeed = new URL(args[++i]);
				} catch (MalformedURLException mue) {
					System.err.println(mue.getMessage());
					System.err.println(getUsage());
					System.exit(1);
				}
			}
			else if ( args[i].trim().equalsIgnoreCase("-f") ) {
				if (i+1 >= args.length) {
					System.err.println(getUsage());
					System.exit(1);
				}
				this.seedFilePath = args[++i];
				this.seedUrls = parseSeedFile(seedFilePath);
				if (this.seedUrls == null) {
					System.err.println("Seed file must contain valid URLs");
					System.err.println(getUsage());
					System.exit(1);
				}
			}
			else if ( args[i].trim().equalsIgnoreCase("-d") ) {
				if (i+1 >= args.length) {
					System.err.println(getUsage());
					System.exit(1);
				}
				this.crawlDepth = new Integer(args[++i]).intValue();
			}
			else if ( args[i].trim().equalsIgnoreCase("-s") ) {
				this.siteOnlyCrawl = true;
			}
			else {
				System.err.println("Invalid argument: " + args[i]);
				System.err.println(getUsage());
				System.exit(1);
			}
		}
		
		if (this.urlSeed != null && this.seedFilePath != null) {
			System.err.println("Please specify a URL or file seed, but not both");
			System.err.println(getUsage());
			System.exit(1);
		}
		
		if (this.seedFilePath != null && this.siteOnlyCrawl) {
			System.err.println("Seed file cannot be provide with a site only crawl");
			System.err.println(getUsage());
			System.exit(1);
		}
	}
	
	private Object[] parseSeedFile(String filePath) {
		ArrayList<URL> urls = new ArrayList<URL>();
		
		String content = Util.readFile(filePath);
		String[] strUrls = content.split("\n");
		
		if (strUrls.length == 0)
			return null;
		
		for (String url : strUrls) {
			try {
				URL u = new URL(url);
				urls.add(u);
			}
			catch (MalformedURLException mue) {
				System.err.println("Malformed URL: " + url);
			}
		}
		
		if (urls.isEmpty())
			return null;
		
		return urls.toArray().clone();
	}
	
	public String getUsage() {
		String usage = null;
		usage = "Usage:\tjava Main -u [url] | -f [file] -d [int] -s\n" +
				"\t-u [url] single url to seed crawler with\n" +
				"\t-f [file] absolute path to seed file of multiple URLs\n" +
				"\t-d [int] crawl depth - number of URLs to crawl\n" +
				"\t-s denotes site only crawl will not spider outside\n\n" +
				"Ex: java Main -u http://foo.com -d 200 -s";
		return usage;
	}
	
	public static void main(String[] args) throws Exception {
		log.info("Initializing Crawler...");
		
		// TODO need to read in thread count for crawler and page analyzer threads from properties
		Main main = new Main();
		main.parseArgs(args);
		
		// initialize objects
		PropertyManager pm = PropertyManager.getInstance();
		
		// consumer threads
		CrawlerQueue cQueue = CrawlerQueue.getInstance();
		PageQueue pQueue = PageQueue.getInstance();
		
		// producer threads
		Crawler crawler = new Crawler();
		PageAnalyzer pAnalyzer = new PageAnalyzer();

		// seed crawler
		Seed seed = new Seed(main.urlSeed, main.crawlDepth, main.siteOnlyCrawl);
		
		// queue up seed url(s) to be crawled
		if (main.urlSeed != null)
			cQueue.enqueue(seed);
		else {
			for (Object url : main.seedUrls) {
				seed.setUrl((URL)url);
				cQueue.enqueue(seed);
			}
		}
		
		// start threads
		Thread t = new Thread(crawler);
		t.start();
		Thread.sleep(500);
		//pAnalyzer.run();
	}
}
