import java.io.*;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Scanner;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;

/**
 * 
 * Description: This class implements the lookup of the search engine. The user
 * will type in a keyword, and the program will look through the index built
 * from the web site the index looked up to find web sites that contain those
 * keywords. The lists of sites will be ordered by the frequency of the keywords
 * in the sites.
 * 
 * @author Peter Tran (pxtran)
 * @author Andy Chhuseng Ieng (cieng) Copyright (C) 2009 Andy ChhuSeng Ieng
 *         <cieng@ucsd.edu> and Peter Tran <pxtran@ucsd.edu>
 */
public class Lookup {

	private static FileInputStream file; // the input stream
	private static Scanner input; // takes input from user
	private static String keyWord; // stores keyword lookup
	// stores the indexing
	private static HashMap<String, HashMap<String, Integer>> mainHash;
	private static String CurrentURL; // the currentURL
	private static boolean pageNotFound = false;
	private static boolean pageFound = false;
	private static boolean pageNull = false;
	/**
	 * function: main
	 * 
	 * @param args
	 *            - The command line arguments
	 * @throws IOException
	 * @throws ClassNotFoundException
	 * 
	 *             Description: This is the main method of the class. It takes
	 *             in the user input, looks up the keyword in the index, sorts
	 *             the list of URL's by frequency of keyword, and prints out the
	 *             URLs in order.
	 */
	@SuppressWarnings("unchecked")
	public static void main(String args[]) throws IOException,
			ClassNotFoundException {

		// used for storing Hashmap with the URL and the word count
		HashMap<String, Integer> URLhash = null;
		Set<String> URLset; // used for storing a set of the hashmap
		String[] URLarray = null; // for storing an array of URLs
		int[] Wcounts = null; // stores the wordcount for each URL
		// for getting multiple keywords
		LinkedList<String> multiwords = new LinkedList<String>();
		// stores stemmed keywords
		LinkedList<String> cleanwords = new LinkedList<String>();
		// gets all urls with all keywords
		HashMap<String, HashMap<String, Integer>> AllURLs = new HashMap<String, HashMap<String, Integer>>();
		// flag to continue program
		boolean flag = false;
		String first; // The first keyword
		

		try {
			System.out.print("Reading index file out... ");
			// reads in object and casts to a HashMap
			file = new FileInputStream(args[0]);
			ObjectInputStream FInput = new ObjectInputStream(file);
			mainHash = (HashMap<String, HashMap<String, Integer>>) FInput
					.readObject();

			// cannot find index file
		} catch (FileNotFoundException e) {
			System.err.println("Couldn't open " + args[1]
					+ " for reading; exiting.");
			System.exit(1);
		}

		System.out.println("Contains " + mainHash.size() + " keywords.");

		input = new Scanner(System.in); // takes input stream

		do {
			pageFound = false;
			multiwords.clear();
			cleanwords.clear();
			flag = false; // reset flag
			// prompt user
			if (pageNotFound == true) {
				System.out.println("RESULTS: " + 0
						+ " documents match all query keywords. ");
			}
			System.out.println("Enter keyword query "
					+ "(blank line to exit the program):");
			keyWord = input.nextLine(); // takes in keyboard input
			first = keyWord;

			int subTrack = 0; // for splitting the string of keywords
			int numDocs = 0; // stores the # of documents found

			// checks if the keyword not blank
			if (keyWord.length() > 0) {

				// creates strings for multiple keywords
				for (int n = 0; n < keyWord.length(); n++) {
					if (keyWord.charAt(n) == ' ') {
						multiwords.add(keyWord.substring(subTrack, n));
						subTrack = n + 1;
					} else if (n == keyWord.length() - 1)
						multiwords.add(keyWord.substring(subTrack, n + 1));
				}
				if (multiwords.size() == 0)
					multiwords.add(keyWord);

				int countInvalid = 0;

				// stems the keyword and stores in linked list
				for (int m = 0; m < multiwords.size(); m++) {
					keyWord = Keyword.makeKeyword(multiwords.get(m)
							.toLowerCase());
					System.out.print("\"" + multiwords.get(m) + "\""
							+ " stems to --> ");
					System.out.print("\"" + keyWord + "\": ");

					// checks for valid keywords
					if (keyWord.equals("")) {
						System.out.println("ignoring.");
						if (++countInvalid == multiwords.size()) {
							System.out
									.println("No valid keywords!  Try again.");
							flag = true;
							countInvalid = 0;
						}
					} else {
						cleanwords.add(keyWord);
						flag = false;
					}
					// prints # of pages found for keyword
					if (mainHash.containsKey(keyWord)) {
						URLhash = mainHash.get(keyWord);
						System.out.println("found in " + URLhash.size()
								+ " web pages.");
						if(m == (multiwords.size()-1))
							pageFound = true;
						pageNotFound = true;
					}
					// for 0 results
					else if (keyWord.length() > 0) {
						System.out.println("found in 0 web pages.");
						flag = true;

					}
				}

				// goes back to prompt user
				if (flag) {
					pageNotFound = true;
					continue;
				}
				// makes a smaller hashmap of the mainhash with the urls with
				// the keywords
				for (int g = 0; g < cleanwords.size(); g++) {
					if (mainHash.containsKey(cleanwords.get(g))) {
						URLhash = mainHash.get(cleanwords.get(g));
						AllURLs.put(cleanwords.get(g), URLhash);
					}
				}
				// System.out.println("mainhash: " + mainHash);

				// stores the urls and wordcounts of the final list
				HashMap<String, Integer> URLhash2 = new HashMap<String, Integer>();

				// gets the first keyword's URLs
				URLhash = AllURLs.get(cleanwords.get(0));
				if (URLhash2 == null || URLhash == null) {
					continue;
				}
				// URLset = new TreeSet<String>();
				// if(URLhash !=null)
				URLset = URLhash.keySet(); // converts to set to iterate

				// int totCount = 0; //the total count of the page
				int Allin = 0; // used for checking if all keywords in page
				// String checkURL; //URL to compare to

				// looks at all keywords and compares all urls to find pages
				// that contain all keywords
				Allin = findAllValidURLs(URLset, cleanwords, AllURLs, URLhash2,
						Allin);
				if(pageNull == true){
					continue;
				}
				// sets hashmap for one keyword
				if (URLhash2.size() == 0)
					URLhash2 = URLhash;

				// makes set to iterate
				URLset = URLhash2.keySet();

				// puts the urls and wordcounts into arrays to sort by freq
				URLarray = new String[URLhash2.size()];
				Wcounts = new int[URLhash2.size()];
				for (int i = 0; i < URLhash2.size(); i++) {
					URLarray[i] = (String) URLset.toArray()[i];
					CurrentURL = URLarray[i];
					Wcounts[i] = URLhash2.get(CurrentURL);
				}

				// sorts the urls and wordcounts
				sortURL(URLarray, Wcounts);
				numDocs = URLhash2.size(); // sets the number of documents
			}

			// prints the results with list of pages found
			if (cleanwords.size() > 0 || pageFound == true) {
				System.out.println("RESULTS: " + numDocs
						+ " documents match all query keywords. ");
				pageNotFound = false;
				System.out.println("Here they are, sorted:");

				for (int count = 0; count < URLarray.length; count++) {
					
					System.out.print(URLarray[count] + ": ");
					System.out.println("score " + Wcounts[count]);
				}
				
			}

			// checks if keyword is blank
		} while (first.length() > 0);

		System.out.println("Okay, bye.");
	}

	/**
	 * Function: findAllValidURLs
	 * 
	 * @param URLset
	 *            -The set of URLs to check
	 * @param cleanwords
	 *            - The stemmed keywords
	 * @param AllURLs
	 *            - All the URLs being check
	 * @param URLhash2
	 *            - The HashMap storing the final list of URLs
	 * @param Allin
	 *            - The number of equal keywords in the page
	 * @return returns the number of equal keywords in the page
	 * 
	 *         Description: This function finds if all the keywords are in the
	 *         page. It will put the URL and the wordcount in the HashMap. It
	 *         will take the first keyword's URL to compare the rest.
	 */
	private static int findAllValidURLs(Set<String> URLset,
			LinkedList<String> cleanwords,
			HashMap<String, HashMap<String, Integer>> AllURLs,
			HashMap<String, Integer> URLhash2, int Allin) {
		int totCount; // the total count of the page
		String checkURL; // URL to compare to
		for (int h = 0; h < URLset.size(); h++) {
			checkURL = (String) URLset.toArray()[h];
			totCount = AllURLs.get(cleanwords.get(0)).get(checkURL);

			for (int l = 1; l < cleanwords.size(); l++) {
				if (AllURLs.get(cleanwords.get(l)) == null){
					pageNotFound = true;
					pageNull = true;
					break;
				}
				for (int r = 0; r < AllURLs.get(cleanwords.get(l)).keySet()
						.toArray().length; r++) {
					if (((String) AllURLs.get(cleanwords.get(l)).keySet()
							.toArray()[r]).equals(checkURL)) {
						Allin++;
						// adds the total word count
						totCount += AllURLs.get(cleanwords.get(l))
								.get(checkURL);
					}
				}
			}
			// if all equal, add it
			if (Allin == cleanwords.size() - 1) {
				URLhash2.put(checkURL, totCount);
				Allin = 0;
			}

			Allin = 0;
		}
		return Allin;
	}

	/**
	 * function: sortURL
	 * 
	 * @param URLarray
	 *            - The array of URLs
	 * @param Wcounts
	 *            - The array of wordcounts
	 * 
	 *            Description: This function will sort the URLs and word counts
	 *            of the keywords based on the frequency of the word counts. The
	 *            function uses bubble sort to put the URL and wordcount in
	 *            order from greatest to least.
	 */
	private static void sortURL(String[] URLarray, int[] Wcounts) {
		// bubble sort
		for (int j = 1; j < URLarray.length; j++) {
			for (int k = 0; k < (URLarray.length - j); k++)
				if (Wcounts[k] < Wcounts[k + 1]) {
					String temp = URLarray[k];
					URLarray[k] = URLarray[k + 1];
					URLarray[k + 1] = temp;

					int temp2 = Wcounts[k];
					Wcounts[k] = Wcounts[k + 1];
					Wcounts[k + 1] = temp2;
				}
		}
	}

}
