/*
 * TCSS 422 Web Spider Project
 * Group Money: Al McKenzie, Michael Pitts, Taylor Zielske
 */
package model;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Observable;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedDeque;

/**
 * This class stores urls and assigns them buffers so that eventually they can be
 * collected and parsed.
 * 
 * @author Michael Pitts
 * @version Oct 25, 2011
 */
public class URLInputCooker extends Observable{
	
	/**
	 * Minimum number of runs before program will complain about running out of urls.
	 */
	private static final int MIN = 15;

	/**
	 * Stores potential urls to be spidered.
	 */
	private final Queue<String> my_potential_urls;
	
	/**
	 * Stores potential domain robots.txt's that need to be parsed.
	 */
	private final Queue<String> my_potential_domains;
	
	/**
	 * List of domain robots.txt urls that have a buffer ready to go.
	 */
	private final Queue<URLParser> my_domains;
	
	/**
	 * List of regular urls that have a buffer ready to go.
	 */
	private final Queue<URLParser> my_urls;
	
	/**
	 * Tracks the number of cycles completed (URLParsers returned).
	 */
	private int my_runs;
	
	/**
	 * Creates a new URLInputCooker.
	 */
	public URLInputCooker() {
		my_potential_urls = new ConcurrentLinkedDeque<String>();
		my_potential_domains = new ConcurrentLinkedDeque<String>();
		my_domains = new ConcurrentLinkedDeque<URLParser>();
		my_urls = new ConcurrentLinkedDeque<URLParser>();
		my_runs = 0;
	}

	/**
	 * Adds a new url to eventually be processed and parsed.
	 * @param the_url is the new url to "cook".
	 */
	public void addURL(final String the_url) {
		my_potential_urls.add(the_url);
	}

	/**
	 * Adds a new domain's robots.txt to be processed.
	 * @param the_domain_url is the new domain's robots.txt url.
	 */
	public void addDomain(final String the_domain_url) {
		my_potential_domains.add(the_domain_url);
	}
	
	/**
	 * Cooks one potential url (sets up the buffer and sends the initial request
	 * to it's server).  Gives priority to domain robots.txt file urls.
	 */
	public void cookOne() {
		if (!my_potential_domains.isEmpty()) { // at least one domain to check
			try { // check domain's robots.txt as priority
				final URL domain = new URL(my_potential_domains.remove());
				try {
					my_domains.add(new URLParser(domain, URLParser.DOMAIN_URL));
				} catch (FileNotFoundException no_file) { // no robots.txt file
					my_domains.add(new URLParser(domain, URLParser.OPEN_DOMAIN, 
							URLParser.getFakeReader()));
				} catch (IOException the_exception) { // host not working, ignore
					my_domains.add(new URLParser(domain, URLParser.NO_DOMAIN, 
							URLParser.getFakeReader()));
				} catch (IllegalArgumentException the_exception) {
					my_domains.add(new URLParser(domain, URLParser.NO_DOMAIN,
							URLParser.getFakeReader())); // bad domain
				}
			} catch (MalformedURLException the_exception) {
				// Should not happen
			} catch (NoSuchElementException the_exception) {
				// the isEmpty() method gave a false go ahead, no domain to set-up
				// try to get a normal url, rather than a robots.txt.
				bufferNormal();
			}
		} else if (!my_potential_urls.isEmpty()) { // try to get normal url to buffer
			bufferNormal();
		}
	}
	
	/**
	 * Called when wanting to get a buffer for a normal url, not a domain's
	 * robots.txt file.  Tries to get a normal url from the potential urls and
	 * sets it up with a buffer.
	 */
	private void bufferNormal() {
		try {
			my_urls.add(new URLParser(new URL(my_potential_urls.remove()), 
				URLParser.NORMAL_URL));
		} catch (IOException the_exception) {
			// not a problem, the url was not reachable or mal-formed
		} catch (NoSuchElementException the_exception) {
			// no urls ready to parse, do nothing.
		}
	}

	/**
	 * Gets one url that is ready to be parsed, giving preference to any domains that
	 * do not yet have a robots.txt file done yet.
	 * @return a URLParser that is ready to be read, or null if none are ready.
	 * @see URLParser#ready()
	 */
	public URLParser getURL() {
		URLParser parser = null;
		if (my_domains.size() > 0) { // priority to domain robots.txt files
			parser = getURLParser(my_domains);
		}
		if (my_urls.size() > 0 && parser == null) { // if no robots.txt to parse, normal url
			parser = getURLParser(my_urls);
		}
		if (parser == null && my_domains.isEmpty() && my_urls.isEmpty() && 
				my_potential_domains.isEmpty() && my_potential_urls.isEmpty() &&
				my_runs > MIN) {
			outOfNet(); // run out of network, no more urls left to search and parse.
		}
		if (parser != null) {
			my_runs++;
		}
		return parser;
	}

	/**
	 * Is called when the parser has no more potential urls or domain robots.txt files
	 * that can be buffered or parsed.  Ends the program before max sites have been searched.
	 */
	private void outOfNet() {
		setChanged();
		notifyObservers();
	}

	/**
	 * Given a queue of URLParsers, this method returns the first ready buffer.
	 * @param the_queue is the queue to search.
	 * @return a URLParser that is ready, or null if none can be found.
	 * @see URLParser#ready()
	 */
	private synchronized URLParser getURLParser(final Queue<URLParser> the_queue) {
		final Iterator<URLParser> iterator = the_queue.iterator();
		URLParser url_parser;
		URLParser good_url = null; // catches the first ready url
		while (iterator.hasNext()) {
			url_parser = iterator.next();
			try {
				if (url_parser.ready()) { // if ready, it can be parsed.
					good_url = url_parser;
					iterator.remove();
				}
			} catch (final IOException the_error) {
				// bad buffer, should be removed
				//the_error.printStackTrace();
				iterator.remove();
			}
		}
		return good_url;
	}
	
	/**
	 * Returns how many urls have not been buffered yet.
	 * @return an integer representing how many urls have not yet be buffered.
	 */
	public int howManyPotentialURLs() {
		return my_potential_domains.size() + my_potential_urls.size();
	}
	
	/**
	 * Returns how many urls have been buffered but not yet parsed.
	 * @return an integer representing how many urls have not yet been parsed.
	 */
	public int howManyBufferedURLs() {
		return my_domains.size() + my_urls.size();
	}
}
