import java.net.*;
import java.util.Hashtable;
import java.util.LinkedList;
import java.util.Set;
import java.util.Vector;
import java.util.Stack;
import java.io.*;
import javax.swing.text.html.parser.ParserDelegator;

/**
 * Parses web pages specified by the user on the command line and indexes the
 * non-trivial words in a data structure, then writes that data structure to a
 * file to be read by the Lookup.java file. Functions as one component in a
 * web-search program.
 * 
 * @author alrik firl (afirl)
 * @author Pin-Wei Cheng (picheng)
 * 
 */
public class Index {

	private Hashtable<String, Hashtable<String, Integer>> hashtable;
	private String firstPage;
	private String searchMethod;
	private int maxPages;
	private String indexFile;

	private int pagesCount;
	private Stack<String> traversedURLs;
	private Stack<String> unworkingURLs;
	private boolean lastURL;

	public Index(String[] args) {
		hashtable = new Hashtable<String, Hashtable<String, Integer>>();
		firstPage = args[0];
		searchMethod = args[1];
		
		try{
			Integer.parseInt(args[2]);
			URL page = new URL(args[0]);
			URLConnection connect = page.openConnection();
			/* sets up the connection to the URL and parses it */
			connect.setDoInput(true);
			System.out.println("Connected to "+args[0]);
		}catch(Exception e){
			System.out.println("Usage: P4SolutionIndex firstpage depth|breadth maxpages indexfile");
			System.exit(0);
		}
		maxPages = Integer.parseInt(args[2]);
		indexFile = args[3];
		pagesCount = 0;
		traversedURLs = new Stack<String>();
		unworkingURLs = new Stack<String>();
		lastURL = false;
	}

	/**
	 * Drives the Index program, reads in the input and calls
	 * Index's methods to ultimatly write the indexed data to
	 * the file specified in the command line arguments.
	 * @param args command line input from user
	 * @throws Exception
	 */
	public static void main(String[] args) throws Exception {
		/* checks input command line arguments */
		if (args.length < 4) {
			System.err.println("Not enough parameters.");
			System.exit(-1);
		}

		Index caller = new Index(args);
		caller.readURL();
		caller.writeHash(caller.hashtable, caller.indexFile);
	}

	/**
	 * Writes the hashtable (serializable) object which relates strings to URLs
	 * and the number of those Strings occurrences to file
	 * 
	 * @param table
	 *            the hashtable to be written to filenm
	 * @param filenm
	 *            the name of the output file
	 */
	public void writeHash(Hashtable<String, Hashtable<String, Integer>> table,
			String filenm) {
		/* write the Hashtable to the file */
		File file = new File(filenm);
		try {
			System.out.print("Writing keyword index in file " + indexFile
					+ "... ");
			// use buffering
			OutputStream fileout = new FileOutputStream(file);
			OutputStream bufferout = new BufferedOutputStream(fileout);
			ObjectOutput output = new ObjectOutputStream(bufferout);
			try {
				output.writeObject(table);
			} finally {
				output.close();
				bufferout.close();
				output.close();
			}

			System.out.println("Done");
		} catch (IOException ex) {
			System.err.println("Failed to write to file");
		}
	}

	/**
	 * Traverses the internet from the start point specified in the url
	 * parameter; traverses in accordance with the DFS algorithm (preorder
	 * traversal)
	 * 
	 * @param url
	 *            the URL to start indexing at
	 * @return denotes whether the traversal was successful
	 * @throws IOException
	 */
	public boolean depthFirstSearchURL(String url) throws IOException {
		if (pagesCount >= maxPages) {
			return false;
		}
		if (pagesCount == maxPages - 1) {
			lastURL = true;
		}
		BufferedReader bufferedReader;
		/* open the web page, buffer the input from it */
		try {
			URL urlPage = new URL(url);
			InputStream inputStream = urlPage.openStream();
			bufferedReader = new BufferedReader(new InputStreamReader(
					inputStream));
		} catch (FileNotFoundException e) {
			unworkingURLs.push(url);
			return true;
		}
		// catch (IOException e){
		// System.out.println("IOException... uh oh");
		// }

		pagesCount++;

		CallbackParser callbackParser = new CallbackParser(url, hashtable);

		if (lastURL == true) {
			callbackParser.setLastURL(true);
		}
		ParserDelegator parserDelegator = new ParserDelegator();
		/* parses the actual web page */
		System.out.print(url + " ");
		parserDelegator.parse(bufferedReader, callbackParser, true);

		traversedURLs.push(url);
		hashtable = callbackParser.getHashtable();

		int distinctKeywordsCount = callbackParser.getDistinctKeywordsCount();

		System.out.println("(" + distinctKeywordsCount + ")");

		/* gets the adjacent URLs to the current web page */
		Stack<String> adjacentURLs = callbackParser.getAdjacentURLs();
		String adjacentURL;
		int adjacentURLsCount = adjacentURLs.size();
		/* Recursively traverse the web pages and their adjacent web pages */
		for (int i = 0; i < adjacentURLsCount; i++) {

			adjacentURL = adjacentURLs.pop();

			boolean traversedResult = traversedURLs.contains(adjacentURL);
			if (traversedResult == false) {
				boolean searchResult = depthFirstSearchURL(adjacentURL);
				if (searchResult == false) {
					return false;
				}
			}
		}

		return true;
	}

	/**
	 * Executes either a bredth first search or a depth first search depending
	 * on the command line arguments based from the starting URL until the max
	 * number of pages has been reached.
	 * 
	 * @throws Exception
	 */
	public void readURL() throws Exception {
		System.out.println("Doing a "
				+ (searchMethod.equals("depth") ? "depth" : "breadth")
				+ "-first search of the Web, starting from");
		System.out.println(firstPage);
		System.out.println("-------------------------------------------------");
		System.out.println("   URL visited          (keywords on page itself)");
		System.out.println("-------------------------------------------------");
		/* Either BFS or DFS */
		if (searchMethod.equals("breadth")) {
			BFS();
		} else if (searchMethod.equals("depth")) {
			depthFirstSearchURL(firstPage);
		} else {
			System.err.println("Invalid search method.");
			return;
		}
		System.out.println();
		totalDistinctKeywordsCalculator();
	}

	/**
	 * Prints out the total number of distinct words and their associated URLs,
	 * taking the URL's HREF keywords into account.
	 */
	private void totalDistinctKeywordsCalculator() {

		Hashtable<String, Integer> totalDistinctKeywords = new Hashtable<String, Integer>();
		Hashtable<String, Integer> hashtableValue;

		Set<String> set = hashtable.keySet();
		Set<String> valueSet;
		/*loops through each lookup string stored in the hashtable*/
		for (String keyword : set) {
			hashtableValue = hashtable.get(keyword);
			valueSet = hashtableValue.keySet();

			Hashtable<String, Stack<String>> tempHashtable = new Hashtable<String, Stack<String>>();
			Hashtable<String, Integer> newHashtableValue = new Hashtable<String, Integer>();
			/*loops through each URL corresponding to each of the lookup strings in the hashtable,
			 * populates the data structures for printing later*/
			for (String url : valueSet) {

				if (traversedURLs.contains(url)) {

					if (!unworkingURLs.contains(url)) {
						if (!tempHashtable.containsKey(url)) {
							Stack<String> tempStack = new Stack<String>();
							tempStack.push(keyword);
							tempHashtable.put(url, tempStack);
						} else {
							Stack<String> pStack = tempHashtable.get(url);
							if (!pStack.contains(keyword)) {
								pStack.push(keyword);
							}
							tempHashtable.put(url, pStack);
						}
					} else {
						if (hashtableValue.size() == 0) {
							hashtable.remove(keyword);
						} else {
							hashtable.put(keyword, hashtableValue);
						}
					}
					newHashtableValue.put(url, hashtableValue.get(url));
				}
			}

			if (newHashtableValue.size() > 0) {
				hashtable.put(keyword, newHashtableValue);
			}

			Set<String> urlSet = tempHashtable.keySet();
			/*populates the totalDistinctKeywords data structure for
			 * the subsequent printing to the command line*/
			for (String url : urlSet) {
				int tempInt = tempHashtable.get(url).size();
				if (totalDistinctKeywords.containsKey(url)) {
					tempInt += totalDistinctKeywords.get(url);
				}
				totalDistinctKeywords.put(url, tempInt);
			}
		}
		/*Prints the Indexing results to the command line*/
		Set<String> totalSet = totalDistinctKeywords.keySet();
		System.out.println("Searched " + totalSet.size() + " web pages and found "
				+ hashtable.size() + " distinct keywords.");

		System.out.println("In the keyword index:");
		System.out.println("-------------------------------------------------");
		System.out.println("   URL visited          (total keywords)");
		System.out.println("-------------------------------------------------");

		

		for (String keyword : totalSet) {
			System.out.println(keyword + " ("
					+ totalDistinctKeywords.get(keyword) + ")");

		}
	}

	/**
	 * Traverses the internet from the start point specified in command line
	 * arguments; traverses in accordance with the BFS algorithm (inorder
	 * traversal)
	 */
	public void BFS() throws Exception {
		Vector<String> visitedList = new Vector<String>();

		LinkedList<Vertex> queue = new LinkedList<Vertex>();

		/* The BFS starts here */
		Vertex start = new Vertex(firstPage);
		int counter = 0;
		queue.add(start);
		while (queue.size() > 0) {
			/* only go as far as maxPage's # of URLs */
			if (counter == maxPages) {
				break;
			}
			/* removes the highest priority element from the list */
			Vertex working = queue.removeFirst();
			/* parses the URL and adds keywords to the hashtable */

			try {
				/* setup for BFS, initialize data */
				URL page = new URL(working.URLVertex);
				URLConnection connect = page.openConnection();
				/* sets up the connection to the URL and parses it */
				connect.setDoInput(true);
				InputStream StreamURL = connect.getInputStream();
				BufferedReader URLinadj = new BufferedReader(
						new InputStreamReader(StreamURL));
				ParserDelegator parseradj = new ParserDelegator();
				CallbackParser parseURLadj = new CallbackParser(
						working.URLVertex, hashtable);
				parseradj.parse(URLinadj, parseURLadj, true);
				System.out.println(working.URLVertex+" ("+parseURLadj.getDistinctKeywordsCount()+")");
				
				/* track which URLs have been parsed and indexed */
				traversedURLs.add(working.URLVertex);
				visitedList.add(working.URLVertex);

				Stack<String> adjtmp = parseURLadj.getAdjacentURLs();
				while (adjtmp.size() > 0) {
					working.adjacentURL.add(adjtmp.pop());
				}
				/*adds the adjacency list for the current URL to the priority queue*/
				for (int i = working.adjacentURL.size()-1; i >= 0 ; i--) {
					String e = working.adjacentURL.elementAt(i);
//					System.out.println("queueing "+ e);
					/* check if the URL has been visited before */
					if (!visitedList.contains(e)) {
						queue.addLast(new Vertex(e));
					}
				}
				counter++;
			} catch (FileNotFoundException e) {
				//e.printStackTrace();
				unworkingURLs.push(working.URLVertex);
				//System.out.println("URL not found!");
				continue;
			} catch (IOException e) {
				System.out.println("Could not open page!");
			}
		}
	}
}
