package traverser;

import java.util.HashMap;

public class KeywordTraverser extends TrieTraverser {

	// TODO: refactor this - visit currently refers both to the initial prioritization, and to
	// 	later returning to the link and operating on it.
	protected PriorityParentQueue toVisit;
	protected KeyTracker tracker;

	private boolean traverseComplete = false;

	public KeywordTraverser (String startPage, String endPage) {
		super(startPage, endPage);
		toVisit = new PriorityParentQueue();
		tracker = new KeyTracker(endPage);
	}

	// Compute the keyword priority of a page
	private int computePriority (String page) {

		// Visit a page, check if it's the last link, and compute its priority
		CacheLinkProcessor lp = new CacheLinkProcessor(page);
		// Note: duplicates are important, and order doesn't matter.
		int priority = 0;
		int numKeys = 0;
		int numLinks = 0;

		for (String link : lp.getLinks()) {
			if (link.equals(endPage)) { // We're done
				traverseComplete = true;
			}
			numLinks++;

			for (String key : KeyTracker.splitAndReduce(link)) {
				priority += tracker.priorityOf(key);
				numKeys++;
			}
		}
		//Normalize priority, so that large pages aren't disproportionately more attractive
		//priority /= numKeys;
		// Note: if we wanted to increase the granularity, we could multiply priority by 10 or something.
		priority *= 100;
		if (numLinks != 0) priority /= numLinks;
		else priority = 0;



		System.out.println(page + "\t\tpriority " + priority);
		return priority;
	}


	public int traverse() {
		return traverse(false);
	}

	public int traverse(boolean quiet) {
		System.out.println("Start page: " + startPage);
		System.out.println("End   page: " + endPage);

		System.out.println("Traversing...");

		// Note: there are some differences between this and other versions of traverse.
		//  The big one is that pages are added to the priority queue only AFTER they
		//  have been visited, because the only way to get the priority is to visit the page.
		//  When all the links on a page have been visited, it is considered exhausted, and
		//  only then will it be removed from the queue.  Because it remains in the trie
		//  (the variable visited), it will not be looked at again.
		//
		// Also, it's important that we use a CacheLinkProcesser (as opposed to a regular
		//  old LinkProcesser) here because it allows us to keep pages we've found the
		//  priority of in memory, rather than going back out to the server for them.

		int count = 0;  // Count of visited pages
		if (endPage.equals(startPage)) {
			return count;
		}

		count++; // The first page counts

		visited.add(startPage, null);
		toVisit.add(startPage, null, computePriority(startPage));

		// Idea: pop a page off the queue and add it to the visited list,
		// get all of the links from that page. If the target page
		// is among them, then stop; otherwise, add all the links
		// to the queue and repeat.
		//String currentPage = "";
		while (toVisit.hasNext()) {
			// Get a page and its priority from the queue
			int currentPriority;
			String currentPage;

			while (true) {
				currentPriority = toVisit.topPriority();
				currentPage = toVisit.pop();
				if (count < 100) {  // don't
					break;
				}
				int adjustedPriority = computePriority(currentPage);

				if (adjustedPriority != currentPriority) {
					toVisit.add(currentPage, null, adjustedPriority);
				} else {
					break;
				}
			}

			CacheLinkProcessor lp = new CacheLinkProcessor(currentPage);
			lp.removeDuplicates();
			lp.knuthShuffle();	// For now, at least, we'll add links to the queue in a random order
			//TODO: come up with a way to sort links by name priority, or something like that

			System.out.println(count + ": Examining " + currentPage + ", priority " + currentPriority + ", " + lp.count() + " unique links");

			// Add links to the queue until one has a higher priority than the current page (which in turn
			//  should have the highest overall priority)
			for (String link : lp.getLinks()) {  // Equivalent to "for (int i = 0; i < links.length; i++)"

				if (toVisit.topPriority() > currentPriority) {

					// We know of a better page to look at than this one, so we'll go there
					break;
				}

				// Don't even look at pages we've already been to.
				if (visited.contains(link)) {
					continue;
				}

				int linkPriority = computePriority(link);
				count++;
				// Note that in computing the priority of link, we must visit it, thus it is added to visited.
				visited.add(link, currentPage);

				if (traverseComplete) {
					visited.add(endPage, link);
					return count;
				}

				toVisit.add(link, currentPage, linkPriority);

			}

			// If we are here, then either currentPage was exhausted, or we found a higher priority page
			if (toVisit.topPriority() > currentPriority) {
				System.out.println("Found higher priority");
				// If currentPage is not exhausted, then we might want to come back to it later.
				toVisit.add(currentPage, visited.parentOf(currentPage), currentPriority);
			} else {
				System.out.println("Exhausted page");
			}

		}

		// If we get here, either we have a bug or wikipedia has a closed loop.
		System.out.println("Looks like we exhausted wikipedia.");

		return -1;

	}

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		String start, end;
		/*
		start = "http://en.wikipedia.org/wiki/Robot";
		end = "http://en.wikipedia.org/wiki/EMIEW";
		*/
		/*
		start="http://en.wikipedia.org/wiki/Library_and_Information_Science";
		end="http://en.wikipedia.org/wiki/Urticina_lofotensis";
		*/
		/*
		start = FrequencyCollector.getRandomPage();
		end = FrequencyCollector.getRandomPage();
		*/
		start="http://en.wikipedia.org/wiki/Purdue";
		end="http://en.wikipedia.org/wiki/Computer_Science";
		if (args.length >= 2) {
			start = "http://en.wikipedia.org/wiki/" + args[0];
			end = "http://en.wikipedia.org/wiki/" + args[1];
		}



		KeywordTraverser test = new KeywordTraverser(start, end);
		System.out.println("Found target in " + test.traverse() + " steps.");
		test.printMetrics();
		test.printPath();

	}

}
