package crawler;

import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;

import org.apache.commons.io.FileUtils;

import crawler.HttpNode;
import crawler.WebsiteReader;

import crawler.HTMLLinkExtrator;
import crawler.HTMLLinkExtrator.HtmlLink;


public class Crawler {

	
	int maxDepth;
	ArrayList<String> seeds;
	ArrayList<String> words;
	String outputPath;
	int linksCounter = 0;
	
	ArrayList<HttpNode> mainQueue;
	
	HashMap<String, HttpNode> crawlingContent;

	
	private boolean isInterestingPage(String pagePlaneContent)
	{
		pagePlaneContent = pagePlaneContent.toUpperCase();
		for(String word : words)
		{
			if (pagePlaneContent.indexOf(word) > 0)
			{
				return true;
			}
		}
		
		return false;
	}
	
	private void SaveCrawlingResult(HttpNode n)
	{
		File dirFile = new File(this.outputPath,DateUtils.now());
		dirFile.mkdir();
		
		
		try
		{
			File outputFile = new File(dirFile.toString(),new Integer(linksCounter++).toString() + ".txt");
			FileUtils.writeStringToFile(outputFile,n.planeText);
			outputFile = new File(dirFile.toString(),new Integer(linksCounter++).toString() + ".html");
			FileUtils.writeStringToFile(outputFile,n.content);
		}catch(Exception e){}
		
	}
	
	private void ProcessHttpNode(HttpNode n)
	{
		System.out.print("(" + String.valueOf(mainQueue.size()) +")Processing link: " + n.link + "...");
		
		if (n.depth > maxDepth || crawlingContent.containsKey(n.link))
		{
			return;
		}
		
		
		crawlingContent.put(n.link, null);
		
		HTMLLinkExtrator linkExtractor = new HTMLLinkExtrator();
		
		String pageContent = WebsiteReader.Read(n.link);
		
		if (pageContent.equals(""))
		{
			return;
		}
		n.setContent(pageContent);
		
		if (n.depth < maxDepth)
		{
			//1. extract links
			ArrayList<HtmlLink> links = linkExtractor.grabHTMLLinks(pageContent);
			
			for(HtmlLink link : links)
			{
				mainQueue.add(new HttpNode(link.link,n.depth+1));
			}
		}
		//since we don't want to crawl link more thank once, we save the link
		
		//check if page contains one of our desired words
		if (isInterestingPage(n.planeText))
		{
			SaveCrawlingResult(n);
		}
		
		
		
		System.out.println("Done! found " + links.size() + " links");
	}
	
	
	private void Crawl()
	{
		mainQueue = new ArrayList<HttpNode>();
		crawlingContent = new HashMap<String, HttpNode>();
		
		//init main loop
		for(String seed : seeds)
		{
			mainQueue.add(new HttpNode(seed,0));
		}
		
		
		while (mainQueue.size() > 0)
		{
			ProcessHttpNode(mainQueue.remove(0));
		}
		
		
	}
	public Crawler(CrawlerStrategy strategy)
	{
		this.maxDepth = strategy.maxDepth;
		this.seeds = strategy.seeds;
		this.words = new ArrayList<String>();
		for(String word : strategy.words)
		{
			this.words.add(word.toUpperCase());
		}
		this.outputPath = strategy.outputPath;
		
		Crawl();
	}
	
	
	public static void main(String[] args) throws Exception
	{
		
		//System.out.println(DateUtils.now("yyyy.MM.dd G 'at' hh:mm:ss z"));
		
		if (args.length != 1)
		{
			System.out.println("Usage: Crawler <strategy file>");
			System.exit(-1);
		}
		
		CrawlerStrategy strategy = new CrawlerStrategy(args[0]);
		
		new Crawler(strategy);
	}

	

	
	
}
