import java.util.ArrayList;
import java.util.Hashtable;
import java.util.List;
import java.util.concurrent.ConcurrentLinkedQueue;

/**
 * The PageParser picks up pages from the buffer and looks for links, putting any found into 
 * the queue.  
 * 
 * Note that http includes relative addressing, e.g., href="../anotherdirectory
 * /anotherpage.html", would translate into href="http://someplatform.xxx/directory/
 * anotherdirectory/anotherpage.html".  
 * 
 * Use a pages_retrieved counter that can be initialized to an integer number input by the 
 * user as the total number of pages to retrieve before stopping. You can hard code in an 
 * absolute upper limit, say 10,000, to make sure the user doesn't try to crawl the whole web.
 */
public class PageParser implements Runnable{	
	/**
	 * Buffer of pages retrieved and yet to be parsed.
	 */
	private PageBuffer pbuff;
	
	private PageToRetrieve rbuff;
	
	/**
	 * Current URL.
	 */
	private Pair<String, String> current;
	/**
	 * List of URLs.
	 */
	private ArrayList<String> urls;
	/**
	 * Words on the page.
	 */
	private int numWordsThisPage;
	/**
	 * Time that Crawl was started.
	 */
	private long startTime;
	/**
	 * Keywords to be searched for.
	 */
	private List<String> keywords;
	
	/**
	 * Datagather.
	 */
	private DataGatherer dgatherer;
	
	/**
	 * Queue underlying the PageBuffer.
	 */
	private ConcurrentLinkedQueue<Pair<String, String>> pqueue;
	
	private ConcurrentLinkedQueue<String> rqueue;
	
	// CONSTRUCTORS *************************************************
	/**
	 * Default constructor, don't call this.
	 */
	private PageParser() {}
	
	public PageParser(PageBuffer pagebuff, PageToRetrieve retbuff, DataGatherer datagatherer) {
		this();
		pbuff = pagebuff;
		rbuff = retbuff;
		rqueue = rbuff.queue();
		pqueue = pbuff.queue();
		current = new Pair<String, String>("", "");
		urls = new ArrayList<String>();
		//default if setKeys is never called
		keywords = new ArrayList<String>();
		dgatherer = datagatherer;
	}
    /**
     * Runs the threads.
     */
    public void tick() { 
        if (pbuff.hasNext()) {
                startTime = System.currentTimeMillis();
                current = pqueue.poll();
                urls = parseURLs(current.val2());
                for (String s : urls) {
                	if (!rbuff.seenURL(s) && !s.contains("questioneverything.typepad.com")){
                		rqueue.add(s);
                	}
                }
                DataObj temp = new DataObj(count(current.val2(), keywords), current.val1(), urls.size(), numWordsThisPage, System.currentTimeMillis() - startTime);
                dgatherer.addObj(temp);
        }
    }
    
    public ArrayList<String> urls(){
    	return urls;
    }

	/**
	 * Sets Keyowrds for threads to search for.
	 * @param keys Keywords to search for.
	 */
	public void setKeys(List<String> keys) {
		this.keywords = keys;
	}
	/**
	 * Parses an entire input string looking for any of the specified search keys. 
	 * @param input String to count.
	 * @param keys What to look for.
	 * @return Hashtable containing the provided keys with values representing how many times 
	 * 			they were found.
	 */
	private Hashtable<String, Integer> count(String input, List<String> keys) {
		Hashtable<String, Integer> ht = new Hashtable<String, Integer>(keys.size());
		numWordsThisPage = 0;
		for(String s : keys) ht.put(s, 0); //init to 0
		//tons of delims to handle potential html input, more probably needed
		String delims = "[ .,?!:;\"<>'\\/=()]+"; //regex to split on punctuation or space, multiple delims together treated as one
		String[] tokens = input.split(delims);
		numWordsThisPage = tokens.length;
		synchronized(keys) {
			for(String s : tokens) {
				if (keys.contains(s))
					ht.put(s, ht.get(s) + 1);
			}
		}
		return ht;
	}
		
	/**
	 * parses an input string looking for new url links.
	 * @param html markup to be parsed
	 * @return ArrayList of url's added
	 */
	private ArrayList<String> parseURLs(String html) {
		ArrayList<String> urls = new ArrayList<String>();
		int index = 0;
		String substr = "";
		Pair<String, Integer> temp = new Pair<String, Integer>("", 0);
		while (true) {		
			temp = getEntireTag(html, index);
			substr = temp.val1();
			index = temp.val2();
			//no complete tags left, quit running
			if (substr.equals("")) break;
			substr = getURL(substr);
			//that tag was malformed or not html/htm/txt, dont add
			//also dont want to add links we have already seen
			if (substr.equals("") || urls.contains(substr)) continue;
			//contextual link handling
			if (!substr.contains("http")) {
				substr = contextual(substr);
			}
			
			urls.add(substr);
		}
		return urls;
	}
	/**
	 * Parses the URL if there is any contextual cases.
	 * @param url URL to be parsed.
	 * @return The URL that has been parsed correctly.
	 */
	private String contextual(String url) {
		//Parses out for '../' contextuals
		String substr = url.replace("../", "");
		String temp2 = current.val1();
		int count = (url.length() - substr.length()) / 3;
		for(int i = 0; i < count; i++) {
			int j = temp2.lastIndexOf('/');
			temp2 = temp2.substring(0, j);
			j = temp2.lastIndexOf('/');
			temp2 = temp2.substring(0, j + 1);
			
		}
		//Parses the page in special cases.
		if (substr.contains("pages/")) {
			substr.replace("pages/", "");
			
			temp2 = temp2.replace("http://", "");
			temp2 = temp2.substring(0, temp2.indexOf('/') + 1);
			temp2 = "http://" + temp2;
		}

		//Combines the url back together.
		int i = temp2.lastIndexOf('/');
		temp2 = temp2.substring(0, i+1);
		if (substr.charAt(0) == '/') substr = substr.substring(1);
		substr = temp2 + substr;
		return substr;
	}
	
	/**
	 * Gets an entire <a href= > </a> tag.
	 * @param html The html to search.
	 * @param startIndex The index to start searching at.
	 * @return The entire tag or the empty string if one does not exist. Modifies startIndex to be the appropriate index for another iteration.
	 */
	private Pair<String, Integer> getEntireTag(String html, int startIndex) {
		String ret = "";
		int index = html.indexOf("<a", startIndex);
		int endTag = html.indexOf("</a>", index);
		if (index != -1 && endTag != -1)
			ret = html.substring(index, endTag + 4).trim();
		Pair<String, Integer> retval = new Pair<String, Integer>(ret, endTag);
		return retval;
	}
	
	/**
	 * 
	 * @param tag
	 * @return
	 */
	private String getURL(String tag) {
		String ret = "";
		int startIndex = tag.indexOf("\"");
		int endIndex = tag.indexOf("\"", startIndex + 1);
		if (startIndex != -1 && endIndex != -1) {
			ret = tag.substring(startIndex + 1, endIndex).trim();
			startIndex = ret.indexOf("#");
			if (startIndex > 0) ret = ret.substring(0, startIndex);
			String[] tokens = ret.split("[.]");
			if (tokens.length >= 1) {
				String extension = tokens[tokens.length - 1].trim();
				if (!extension.equals("html") && !extension.equals("htm") && !extension.equals("txt"))
					ret = "";
			}
		}
		return ret;
	}

	@Override
	public void run() {
		tick();		
	}
}
