/*
 * TCSS 422 Web Spider Project
 * Group Money: Al McKenzie, Michael Pitts, Taylor Zielske
 */
package control.threads;

import java.io.IOException;
import java.net.URL;

import control.data.SearchHopper;

import model.DomainSearchParser;
import model.URLInputCooker;
import model.URLParser;
import model.URLSearchParser;
import model.URLStore;
import model.WordSearchParser;
import model.domain.BlockDomain;
import model.domain.Domain;
import model.domain.OpenDomain;

/**
 * This abstract class handles all the functionality that a ThreadDirector would need
 * regardless of thread distribution.
 * 
 * @author Michael Pitts
 * @version Nov 1, 2011
 */
public abstract class AbstractThreadDirector implements ThreadDirector {
	
	/**
	 * The URLInputCooker that will be used to get urls from.
	 */
	private final URLInputCooker my_cooker;
	
	/**
	 * The SearchHopper that will be used to put results into.
	 */
	private final SearchHopper my_hopper;
	
	/**
	 * The URLStore that new urls will be feed into.
	 */
	private final URLStore my_store;
	
	/**
	 * The words to search on for html pages.
	 */
	private final String[] my_words;
	
	/**
	 * Creates a new ThreadManager.
	 * @param the_cooker is the URLCooker for this program.
	 * @param the_hopper is the URLSearchHopper for this program.
	 * @param the_store is the URLStore for this program.
	 * @param the_search_words are the words to search on over this program.
	 */
	public AbstractThreadDirector(final URLInputCooker the_cooker,
			final SearchHopper the_hopper, final URLStore the_store,
			final String[] the_search_words) {
		my_cooker = the_cooker;
		my_hopper = the_hopper;
		my_store = the_store;
		my_words = the_search_words;
	}
	
	/**
	 * Parses every line from a given URLParser, adding the word search results to the
	 * hopper and new urls to the URLStore.
	 * @param the_parser is the parser-url to search.
	 */
	private void parseURL(final URLParser the_parser) {
		String line;
		try {
			line = the_parser.readLine();
			final WordSearchParser words = new WordSearchParser(my_words);
			final URLSearchParser urls = new URLSearchParser(the_parser.getURL());
			while (line != null) {
				words.parseLine(line);
				urls.parseLine(line);
				line = the_parser.readLine();
			}
			my_hopper.addResults(words.getData(), the_parser.getURL().toString(), 
					urls.getResults().length); // adds word data to hopper
			for (URL url : urls.getResults()) {
				my_store.addURL(url); // put found urls into store to be seached later
			}
			the_parser.close();
		} catch (IOException the_error) {
			// Should not happen, as buffer has already been confirmed as ready
			//the_error.printStackTrace();
		}
	}
	
	/**
	 * Parses a given robots.txt for a particular domain, determining what rules should
	 * be used when adding new urls for that domain.  A new domain is used to replace
	 * the place holder domain in the url store.
	 * @param the_parser is the robots.txt parser to read from.
	 */
	private void parseDomain(final URLParser the_parser) {
		Domain result;
		if (the_parser.getType() == URLParser.NO_DOMAIN) { // can't find it
			result = new BlockDomain(the_parser.getURL().getHost(), my_cooker);
		} else if (the_parser.getType() == URLParser.OPEN_DOMAIN) { // no robots.txt
			result = new OpenDomain(the_parser.getURL().getHost(), my_cooker);
		} else { // has robots.txt and limits
			try {
				String line = the_parser.readLine();
				final DomainSearchParser parser = new DomainSearchParser(the_parser.getURL(), 
						my_cooker);
				while (line != null) {
					parser.parseLine(line);
					line = the_parser.readLine();
				}
				result = parser.getResults(); // get new domain based on robots data
				the_parser.close();
			} catch (IOException the_exception) {
				// Should not happen, buffer was already checked as ready.
				result = new BlockDomain(the_parser.getURL().getHost(), my_cooker);
				// When in doubt, allow no searching on the domain.
			}
		}
		my_store.replaceDomain(result);
	}
	
	/**
	 * Tries to get one url parsed, but there may not be any available.
	 * @return true if a url was parsed, false otherwise.
	 * @see URLInputCooker#getURL()
	 * @see URLParser#ready()
	 */
	protected final boolean parseOne() {
		final URLParser parser = my_cooker.getURL();
		final boolean result = parser != null;
		if (result) { // if there is an available url to parse
			if (parser.getType() == URLParser.NORMAL_URL) {
				parseURL(parser);
			} else {
				parseDomain(parser);
			}
		}
		return result;
	}
	
	/**
	 * Cooks one url, setting up a buffer for it.
	 * @see URLInputCooker#cookOne()
	 */
	protected final void cookOne() {
		my_cooker.cookOne();
	}
}
