//Steve Hipolito, Daniel Beraun - Project 1

import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.LinkedList;
import java.util.Queue;

/**
 * A class to retrieve a page from a url.
 *
 */
public class RunnablePageRetriever implements Runnable {

	private RunnablePageAnalyzer rpa;
	private RunnablePageParser rpp;
	private int limit;
	private int runs = 0;
	private boolean stop = false;
	private String mainURL;
	
	/** Queue of links (as strings) found on the web page. */
	private Queue<String> links = new LinkedList<String>();

	public RunnablePageRetriever(final RunnablePageAnalyzer the_rpa, final int limit, final String main) {
		rpa = the_rpa;
		this.limit = limit;
		mainURL = main;
	}
	
	public void setRunnableParser(final RunnablePageParser rpp) {
		this.rpp = rpp;
	}
	
	public void addURLStringToQueueToScan(String url) {
		if (runs < limit) {
			runs++;
			links.add(url);
		}
	}

	/**
	 * Takes the input URL (as a string) and puts the source code into one
	 * string. The PageParser parses through that string. Doesn't run if number
	 * of pages crawled is past the limit.
	 * 
	 * @throws IOException
	 * @author Daniel Beraun, Steve Hipolito
	 */
	private synchronized void scanPage() throws IOException {
		if (rpa.timerStart == 0)
			rpa.timerStart = System.currentTimeMillis(); // start timer
		
		String urlString = links.peek();
		String pageWords = "";
		URL url;
		try {
			url = new URL(urlString);
			BufferedReader br = 
					new BufferedReader(new InputStreamReader(url.openStream()));

			String temp = "";
			while ((temp = br.readLine()) != null) {
				pageWords = pageWords + temp;
			}
		} catch (MalformedURLException e) {} 
		catch (FileNotFoundException e) {}
		catch (NullPointerException e) {}
		catch (IOException e) {}
		
		rpp.addStringAndURLToQueueToParse(pageWords, urlString, mainURL);
		links.remove();
	}

	public void run() {
		while(!stop) {
			// while there are still links needed to be scanned
			while (!links.isEmpty()) {
				try {
					scanPage();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
	}
	
	public boolean done() {
		return links.isEmpty();
	}
	
	public void stop() {
		stop = true;
	}
}
