/*
 * TCSS 422 Web Spider Project
 * Group Money: Al McKenzie, Michael Pitts, Taylor Zielske
 */
package model;

import java.net.URL;
import java.util.ArrayList;
import java.util.List;

import model.domain.BlackListDomain;
import model.domain.BlockDomain;
import model.domain.Domain;
import model.domain.OpenDomain;
import model.domain.WhiteListDomain;

/**
 * This class parses a given domain robots.txt file, returning a new domain 
 * representing the rules given from the robots.txt file.
 * 
 * @author Michael Pitts
 * @version Oct 25, 2011
 */
public class DomainSearchParser {
	
	/**
	 * The text leading an allow statement.
	 */
	private static final String ALLOW = "Allow: ";
	
	/**
	 * The text leading a disallow statement.
	 */
	private static final String DISALLOW= "Disallow: ";
	
	/**
	 * The text leading a user agent statement.
	 */
	private static final String USERAGENT = "User-agent: ";
	
	/**
	 * The list of directories found in the robots.txt file.
	 */
	private final List<String> my_directories;
	
	/**
	 * The list of urls found in the robots.txt file.
	 */
	private final List<String> my_urls;
	
	/**
	 * The string representation of the domain.
	 */
	private final String my_domain;
	
	/**
	 * The URLInputCooker for this program.
	 */
	private final URLInputCooker my_cooker;
	
	/**
	 * The type of domain to return.
	 */
	private Class<? extends Domain> my_type;
	
	/**
	 * This tracks if the current parsing is for generic user agents.  If it is, will
	 * equal true, else will be false.
	 */
	private boolean my_user_go;
	
	/**
	 * Creates a new DomainSearchParser, which is ready to parse a robots.txt file.
	 * @param the_url is the url of the robots.txt file.
	 * @param the_cooker is this program's URLInputCooker.
	 */
	public DomainSearchParser(final URL the_url, final URLInputCooker the_cooker) {
		my_directories = new ArrayList<String>();
		my_urls = new ArrayList<String>();
		my_type = null; // don't know what type of domain it is yet
		my_domain = the_url.getHost();
		my_cooker = the_cooker;
		my_user_go = false; // by default not a generic user agent
	}

	/**
	 * Parses a single line of input from the robots.txt file.
	 * @param the_line is the line to parse for data.
	 */
	public void parseLine(final String the_line) {
		if (my_user_go) { // are we reading general user agent data
			if (!BlockDomain.class.equals(my_type) && 
					!BlackListDomain.class.equals(my_type)&& 
					the_line.startsWith(ALLOW)) {
				runAllow(the_line); // Not going to follow allowed sub-domains
			} else if (the_line.startsWith(DISALLOW)) {
				runDisallow(the_line);
			}
		} else if (the_line.startsWith(USERAGENT)) {
			my_user_go = (the_line.charAt(12) == '*'); // now reading as general user agent
		}
	}
	
	/**
	 * Goes over a line beginning with Disallow: .
	 * @param the_line is the line to parse.
	 */
	private void runDisallow(final String the_line) {
		final String[] split = the_line.split(DISALLOW);
		if (split.length > 1) {
			final String not_allowed = split[1];
			if ("/".equals(not_allowed)) {
				my_type = BlockDomain.class; // not allowing all directories
			} else {
				my_type = BlackListDomain.class; // blocking some directories
				if (not_allowed.endsWith(".html") || not_allowed.endsWith(".htm")) {
					my_urls.add("http://" + my_domain + not_allowed); // blocking specific file
				} else if (not_allowed.endsWith("/")) {
					my_directories.add("http://" + my_domain + not_allowed); // blocking directory
				}
			}
		}
	}

	/**
	 * Goes over a line beginning with Allow: .
	 * @param the_line is the line to parse.
	 */
	private void runAllow(final String the_line) {
		my_type = WhiteListDomain.class; // assume will block most urls
		final String allowed = the_line.split(ALLOW)[1];
		if (allowed.endsWith(".html") || allowed.endsWith(".htm")) {
			my_urls.add("http://" + my_domain + allowed); // allowing specific file
		} else if (allowed.endsWith("/")) {
			if ("/".equals(allowed)) { // allowing all directories
				my_type = OpenDomain.class;
			} else { // allowing one particular directory
				my_directories.add("http://" + my_domain + allowed);
			}
		}
	}

	/**
	 * Gets a new domain loaded with the rules parsed from the feed lines.
	 * @return a new Domain of the correct type based on parsed data.
	 */
	public Domain getResults() {
		Domain results; // stores the final domain
		if (my_type != null) {
			final String[][] strings = new String[2][];
			strings[0] = my_directories.toArray(new String[my_directories.size()]);
			strings[1] = my_urls.toArray(new String[my_urls.size()]);
			if (BlackListDomain.class.equals(my_type)) {
				results = new BlackListDomain(my_domain, my_cooker, 
						strings[0], strings[1]);
			} else if (WhiteListDomain.class.equals(my_type)) {
				results = new WhiteListDomain(my_domain, my_cooker, 
						strings[0], strings[1]);
			} else if (OpenDomain.class.equals(my_type)) {
				results = new OpenDomain(my_domain, my_cooker);
			} else {
				results = new BlockDomain(my_domain, my_cooker);
			}
		} else { // robots.txt file made no reference to general user agents, 
			// assuming all files and directories are okay.
			results = new OpenDomain(my_domain, my_cooker);
		}
		return results;
	}
}
