//Steve Hipolito, Daniel Beraun - Project 1

import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.LinkedList;
import java.util.Queue;

public class PageRetriever {
	private PageAnalyzer pa;
	private PageParser pp;
	private int limit;
	private int runs = 0;
	private String mainURL;
	
	/** Queue of links (as strings) found on the web page. */
	private Queue<String> links = new LinkedList<String>();

	public PageRetriever(final PageAnalyzer the_pa, final int limit, final String main) {
		pp = new PageParser(this, the_pa);
		pa = the_pa;
		this.limit = limit;
		mainURL = main;
	}
	
	public boolean isLinksQueueEmpty() {
		return links.isEmpty();
	}
	
	public void addURLStringToQueueToScan(String url) {
		links.add(url);
	}

	/**
	 * Takes the input URL (as a string) and puts the source code into one
	 * string. The PageParser parses through that string. Doesn't run if number
	 * of pages crawled is past the limit.
	 * 
	 * @throws IOException
	 * @author Daniel Beraun, Steve Hipolito
	 */
	public void scanPage() throws IOException {
		if (pa.timerStart == 0)
			pa.timerStart = System.currentTimeMillis(); // start timer
		
		String urlString = links.remove();

		if (runs < limit) {
			runs++;
			String pageWords = "";

			URL url;
			try {
				url = new URL(urlString);
				BufferedReader br = 
						new BufferedReader(new InputStreamReader(url.openStream()));

				String temp = "";
				while ((temp = br.readLine()) != null) {
					pageWords = pageWords + temp;
				}
			} catch (MalformedURLException e) {} 
			catch (FileNotFoundException e) {} 
			catch (NullPointerException e) {}
			catch (IOException e) {}

			pp.addStringAndURLToQueueToParse(pageWords, urlString, mainURL);
			pp.parse();
		}
	}
}
