import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.ObjectOutputStream;
import java.io.Reader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.LinkedList;
import java.util.Set;

import javax.swing.text.html.HTMLEditorKit.ParserCallback;
import javax.swing.text.html.parser.ParserDelegator;

/**
 * Index is given a starting html searching a html for a distinct word from a
 * page and writing the hashTable result to user specified file
 * 
 * @description
 * 
 * @author vihuynh, k7ho
 * 
 */
public class Index {
	// error message
	final static String ERR_URL = "%s is not a valid absolute URL.\n";
	final static String MSG_USAGE = "Usage: %s firstpage depth|breadth maxpages indexfile\n";
	final static String MSG_START = "Doing a %s-first search of the Web, starting from \n%s\n";
	final static String MSG_HEADER = "Doing a %s-first search of the Web, starting from\n%s\n";
	private Hashtable<String, Hashtable<String, Integer>> hashTbl; // hold
	// the data structure key is the distinct words, v is the hashtable for the
	// URL and hits count
	private Hashtable<String, LinkedList<String>> aTagAnchor; // store the anchor 
	static LinkedList<String> urlLinks; // href list for BFS/DFS
	static LinkedList<String> visitedLinks; // the link visited or read

	static HashMap<String, Integer> urlList; // urlList

	private String[] args;
	private int maxPage = 0; // hold the max page of visited
	private boolean breadth = true; // t-breadth-first f- depth-first defaul-t

	/**
	 * Constructor that take in an string array of args from main Initializing
	 * the variables Set the breadth false if args[1] equal "depth"
	 * 
	 * @param args
	 */
	public Index(String[] args) {
		this.args = args;
		hashTbl = new Hashtable<String, Hashtable<String, Integer>>();
		maxPage = new Integer(this.args[2]);
		aTagAnchor = new Hashtable<String, LinkedList<String>>();
		urlLinks = new LinkedList<String>();
		visitedLinks = new LinkedList<String>();
		urlList = new HashMap<String, Integer>();

		if (args[1].equals("depth")) {
			breadth = false;
		}
	}

	/**
	 * Main driver of the index class check the args and print corresponding
	 * Message to user Call read HTML of the start page if the html return a
	 * list of href URL link we do breath / depth first search keep calling
	 * readHTML Loop till the hrefList is empty or reach max page limits
	 * 
	 * @param args
	 */
	@SuppressWarnings("static-access")
	public static void main(String[] args) {

		// check if has all require argument parameters w/ proper maxpages
		try {
			if (args.length < 4 || new Integer(args[2]) < 0) {
				System.out.printf(MSG_USAGE, Index.class.getName());
				return;
			}
			else if (new Integer(args[2]) > 40) {
				System.out.println("Sorry, this test version will not spider "
						+ "more than 40 pages.");
				return;
			}
		} catch (NumberFormatException num) {
			System.out.printf(MSG_USAGE, Index.class.getName());
			return;
		}

		// check if original URL is absolute URL
		try {
			@SuppressWarnings("unused")
			URL testURL = new URL(args[0]);
		} catch (MalformedURLException num) {
			System.out.printf("%s is not a valid absolute URL.\n", args[0]);
			System.out.printf(MSG_USAGE, Index.class.getName());
			return;
		}

		try {
			// calling index object
			Index index = new Index(args);

			String readingURL = args[0]; // set the start page
			// check search type : breadth or depth
			if (index.getBreadth()) {
				System.out.printf(MSG_HEADER, "breadth", readingURL);
			}
			else {
				System.out.printf(MSG_HEADER, "depth", readingURL);
			}

			System.out
					.print("-------------------------------------------------\n"
							+ "   URL visited          (keywords on page itself)\n"
							+ "-------------------------------------------------\n");

			do {
				index.readHTML(readingURL); // call index to readHTML

				if (!urlLinks.isEmpty()) { // work only href list is not empty
					if (index.getBreadth()) { // find search type
						readingURL = urlLinks.removeFirst();
					}
					else {
						readingURL = urlLinks.removeLast();
					}
				}
				else {
					break;
				}

			} while (visitedLinks.size() < index.getMaxPage()); // work till
																// reach maxpage
																// size

			for (String url : visitedLinks) {

				if (index.aTagAnchor.containsKey(url)) {

					for (String addingWord : index.aTagAnchor.get(url)) {
						Hashtable<String, Integer> smallTbl = index.hashTbl
								.get(addingWord);

						if (smallTbl.containsKey(url)) {
							// System.err.println("*");
							smallTbl.put(url, smallTbl.get(url) + 1);
						}
						else {
							// System.err.println("new list" );
							smallTbl.put(url, 1);
							index.urlList.put(url, index.urlList.get(url) + 1);
						}

						index.hashTbl.put(addingWord, smallTbl);
					}
					index.aTagAnchor.remove(url);

				}
			}
			System.out.printf("Searched %d web pages and found %d distinct "
					+ "keywords.\n", visitedLinks.size(), index.hashTbl.size());
			System.out.println("In the keyword index:");

			System.out
					.print("-------------------------------------------------\n"
							+ "   URL visited          (total keywords)\n"
							+ "-------------------------------------------------\n");

			Set<String> strSet = urlList.keySet();
			for (String str : strSet) {
				System.out.println(str + " (" + urlList.get(str) + ")");
			}

			index.writeFile(index.getHashTbl(), args[3]);

		} catch (MalformedURLException m) {
			// user input not valid URL
			System.out.printf(ERR_URL, args[0]);
			System.out.printf(MSG_USAGE, Index.class.getName());
			// m.printStackTrace();
		} catch (IOException e) {
			// file not found err
			System.out.printf(ERR_URL, args[0]);
			System.out.printf(MSG_USAGE, Index.class.getName());
			// e.printStackTrace();
		}
	}

	/**
	 * get the big hashtable which stores the distinct word, and other hashtable 
	 * that hold URL and hit counts
	 * 
	 * @return the hashtable hold distinct words
	 */
	public Hashtable<String, Hashtable<String, Integer>> getHashTbl() {
		return hashTbl;
	}

	
	/**
	 * read the html of the input urlLink
	 * dont visit the urlLink is already visited
	 * make URL object of the URL Link 
	 * call the parser object to parse the html and read by the delagator
	 * print out the url link and distinct words from the newParser object.
	 * added count and url link to the visited list
	 *  
	 * @param urlLink
	 * @throws MalformedURLException
	 * @throws IOException
	 */
	private void readHTML(String urlLink) throws MalformedURLException,
			IOException {

		if (visitedLinks.contains(urlLink)) {
			return;
		}

		try {
			URL myURL = new URL(urlLink);

			ParserCallback myParser = new NewParserCallBack(myURL, hashTbl,
					aTagAnchor);

			Reader myReader = new BufferedReader(new InputStreamReader(myURL
					.openStream()));

			ParserDelegator pDelegator = new ParserDelegator();
			pDelegator.parse(myReader, myParser, true);

			System.out.println(urlLink + " " + "("
					+ ((NewParserCallBack) myParser).wordCount + ")");
			visitedLinks.add(urlLink);

			urlList.put(urlLink, ((NewParserCallBack) myParser).wordCount);
			urlLinks.addAll(((NewParserCallBack) myParser).getHrefList());
		} catch (Exception e) {
			/*** TESTING ***********************************************/
			// System.err.println(" wrong URL"+ urlLink);
			/*** TESTING ***********************************************/
		}

	}

	
	/**
	 * write the bigHashtable putput the filename 
	 * @param hashTbl the file need to be written 
	 * @param fileName the name of the output file
	 */
	public void writeFile(
			Hashtable<String, Hashtable<String, Integer>> hashTbl,
			String fileName) {
		try {

			BufferedOutputStream bos = new BufferedOutputStream(
					new FileOutputStream(fileName)); // buffered Output 
			ObjectOutputStream oos = new ObjectOutputStream(bos); // 

			System.out.printf("Writing keyword index in file %s... Done \n",
					fileName);
			oos.writeObject(hashTbl);
			bos.close();
			oos.close();

		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	
	/**
	 * Getting the limit of the max page
	 * @return maxPage 
	 */
	public int getMaxPage() {
		return maxPage;
	}

	
	/**
	 * Getting the search type
	 * @return breadth is true or else false
	 */
	public boolean getBreadth() {
		return breadth;
	}

}
