/* Liviu Patrasco & Holly Beach
 * TCSS422 Project 1: Web Crawler
 * February 6, 2012
 */
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.SocketTimeoutException;
import java.net.URL;
import java.net.URLConnection;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;

/** The MThreadedPageRetriever acts as a consumer of 
 *  url strings  and a producer of raw retrieved url data.  
 *  It is designed to work in a single threaded environment 
 *  @author Liviu and Holly
 */
public class PageRetriever {
	private Queue<RawPage> retrieved_docs_q = new LinkedList<RawPage>();
	private List<String> retrieved_urls = new ArrayList<String>();
	private Queue<String> url_q = new LinkedList<String>();
	private int total_pages_to_retrieve;
	private boolean comm_error = false;
	
	public PageRetriever () {
		//nothing to do
	}

	public void inQueueLink(final String link) {
		url_q.add(link);
	}

	public RawPage retrievedDocsQPoll() {
		return retrieved_docs_q.poll();
	}

	public boolean retrievedDocsQIsEmpty() {
		return retrieved_docs_q.isEmpty();
	}
	
	/**
	 * 
	 * @param n_pages to retrieve
	 */
	public void setTotalPages(final int n_pages) {
		total_pages_to_retrieve = n_pages;
	}


	public int retrieve() {	
		while (!url_q.isEmpty() && total_pages_to_retrieve > 0) {
			String url_str = "";
			try {
				url_str = url_q.poll();
				if (retrieved_urls.contains(url_str)){
					continue; //go to next URL
				}
				URL url = new URL(url_str);
				URLConnection connection = url.openConnection(); 
				//set communication timeouts to 10 seconds
				connection.setConnectTimeout(10000);
				connection.setReadTimeout(10000);
				BufferedReader in = new BufferedReader( 
						new InputStreamReader( 
								connection.getInputStream())); 
				//only retrieve html or text pages
				
				if (connection.getContentType() != null &&
				(connection.getContentType()).contains("text/html") ||
				(connection.getContentType()).contains("text/plain")){				
					String inputLine;
					StringBuilder doc = new StringBuilder();
					while ((inputLine = in.readLine()) != null) {
						doc.append(inputLine);
					}
					in.close();
					retrieved_docs_q.add(new RawPage(doc.toString(), url.toString()));
					retrieved_urls.add(url.toString());
					total_pages_to_retrieve--;
				}
			} catch (UnknownHostException e) {
				//System.out.println("Unable to connect to URL: " + url_str);
				comm_error = true;
				break;
			} catch (SocketTimeoutException e) {
				//System.out.println("Socket timeout on URL: " + url_str);
				comm_error = true;
				break;
			} catch (IOException e) {
				//System.out.println("Error reading input stream on URL:  " + url_str);
				continue; //go to next URL
			}
		}
		
		return total_pages_to_retrieve;
	}
	
	public Queue<String> getUrlQ(){
		return url_q;
	}
	
	/**
	 * Clear out vars
	 */
	public void reset() {
		retrieved_docs_q.clear();
		retrieved_urls.clear();
		url_q.clear();
	}

	public void inQueueLink(List<String> new_links) {
		for (String link : new_links) {
			inQueueLink(link);
		}
	}

	protected int pagesLeft() {
		return total_pages_to_retrieve;
	}
	
	public  boolean commErrorReceived(){
		return comm_error;
	}
}
