/* Liviu Patrasco & Holly Beach
 * TCSS422 Project 1: Web Crawler
 * February 6, 2012
 */
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.SocketTimeoutException;
import java.net.URL;
import java.net.URLConnection;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;

/** The MThreadedPageRetriever acts as a consumer of 
 *  url strings  and a producer of raw retrieved url data.  
 *  It is designed to work in a multi-threaded environment using
 * blocking queues.
 * @author Liviu and Holly
 */
public class MThreadedPageRetriever implements Runnable {
	public static RawPage NO_MORE_DOCS_MARKER = new RawPage("X", "");
	public static final RawPage NO_MORE_URLS_MARKER = new RawPage("NOT ENOUGH URLS", "");
	public static final RawPage COMM_ERROR_MARKER = new RawPage("COMM ERROR", "");
	public static final RawPage INTERRUPTED_MARKER = new RawPage("INTERRUPTED", "");
	private String url_str = "";
	private BlockingQueue<String> url_q;
	private BlockingQueue<RawPage> retrieved_docs_q;
	private List<String> retrieved_urls = new ArrayList<String>();
	private int total_pages_to_retrieve;
	private boolean early_termination = false;
	private boolean comm_error = false;
	private boolean interrupted = false;
	
	/**
	 * 
	 * @param urls queue
	 * @param ret_docs retrieved docs queue
	 * @param num_pages pages to retrieve
	 */
	public MThreadedPageRetriever (final BlockingQueue<String> urls,
			final BlockingQueue<RawPage> ret_docs, final int num_pages) {
		url_q = urls;
		retrieved_docs_q = ret_docs;
		total_pages_to_retrieve = num_pages;
	}
	
	public void stopExecution(boolean stopped){
		early_termination = stopped;
	}

	@Override
	public void run() {
		while (total_pages_to_retrieve > 0 && !early_termination) {
			try{
				String url_str;	
				url_str = url_q.poll(500, TimeUnit.MILLISECONDS);
				if (url_str == null){
					if(retrieved_docs_q.isEmpty()){ //stop retrieving, no urls left.
						early_termination = true;
						break;
					} else { //more urls may become available
						continue;
					}
				}
				if (url_str.isEmpty()) {
					//Url string is an empty string, just skip it
					continue;
				} else if (url_str.equals(NO_MORE_URLS_MARKER)) {
					break;
				} else if (!retrieved_urls.contains(url_str)) {  //only retrieve "unseen" pages
					URL url = new URL(url_str);
					URLConnection connection = url.openConnection(); 
					//set communication timeouts to 10 seconds
					connection.setConnectTimeout(10000);
					connection.setReadTimeout(10000);
					BufferedReader in = new BufferedReader( 
							new InputStreamReader( connection.getInputStream())); 
					//only retrieve HTML or Text documents
					if (connection.getContentType() != null &&
							((connection.getContentType()).contains("text/html") ||
							(connection.getContentType()).contains("text/plain"))){					
						String inputLine;
						StringBuilder doc = new StringBuilder();
						while ((inputLine = in.readLine()) != null) {
							doc.append(inputLine);
						}
						in.close();	
						retrieved_docs_q.add(new RawPage(doc.toString(), url_str));
						retrieved_urls.add(url_str);		
						total_pages_to_retrieve--;
					}
				} 
			} catch (InterruptedException e){
				//stop retrieving
				interrupted = true;
				break;
			} catch (UnknownHostException e) {
				//System.out.println("Unable to connect to URL: " + url_str);
				comm_error = true;
				break;
			} catch (SocketTimeoutException e) {
				//System.out.println("Socket timeout on URL: " + url_str);
				comm_error = true;
				break;
			} catch (IOException e) {
				//System.out.println("Error reading input stream on URL:  " + url_str);
				continue; //go to next URL
			}
		}
		//place appropriate end marker in queue to let parser know we are done
		if (early_termination)
			retrieved_docs_q.add(NO_MORE_URLS_MARKER);
		else if(comm_error)
			retrieved_docs_q.add(COMM_ERROR_MARKER);
		else if(interrupted)
			retrieved_docs_q.add(INTERRUPTED_MARKER);
		else
			retrieved_docs_q.add(NO_MORE_DOCS_MARKER);
	}
}
