/*
 * Improbability Drive
 * Phillip Cardon, Thach Nguyen, Cristopher Claeys
 * 4/26/2011
 */

package background;
import java.io.IOException;
import java.net.URL;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

import structures.Tuple;
import ui.Launcher;
import buffers.SynchronizedBuffer;
import buffers.URLsRetrieved;

/**
 * URLReader, code based on class retrieved from:
 * http://download.oracle.com/javase/tutorial/networking/urls/readingURL.html
 * @author Oracle
 * @author Phillip Cardon
 * @author Chris Claeys
 * @author Thach Nguyen
 * @author Jonathan Hedley - Contributed JSoup library
 */
public class PageRetriever extends Thread {
    //CLASS CONSTANTS
	/**
	 * Prohibited domain
	 */
	private static final String PROHIBITED = "questioneverything.typepad.com";
	
	//FIELDS
	/**
	 * Buffer containing URLs of pages which need to be retrieved.
	 */
	private final SynchronizedBuffer<URL> pageToRetrieveBuffer;
	
	/**
	 * Buffer to place retrieved pages.
	 */
    private final SynchronizedBuffer<Tuple<Document, URL>> pageBuffer;
    
    /**
     * Set of pages that have been retrieved.
     */
    private final URLsRetrieved visited;
    
    /**
     * 
     */
    private static int limit;
    
    /**
     * 
     */
    private static int crawled;
    
    /**
     * Flag for threat state.
     */
    private static boolean alive;
    
    /**
     * Constructor
     * @param theRetrieveBuffer buffer for urls.
     * @param thePages buffer for pages.
     * @param theVisited pages visited.
     */
    public PageRetriever(SynchronizedBuffer<URL> theRetrieveBuffer,
    		SynchronizedBuffer<Tuple<Document, URL>> thePages,
    				 URLsRetrieved theVisited, int toCrawl) {
    	pageToRetrieveBuffer = theRetrieveBuffer;
    	pageBuffer = thePages;
    	visited = theVisited;
    	limit = toCrawl;
    	alive = true;
    	crawled = 0;
    }
    
    /**
     * Runs until pageToRetrieve buffer is empty.
     */
	public void run() {
		URL u;
		
		do {	
			u = null;
			synchronized (pageToRetrieveBuffer) {
				if (pageToRetrieveBuffer.size() > 0) {
					u = pageToRetrieveBuffer.dequeue();	
					if (u.toString().contains(PROHIBITED)) {
						visited.visited(u);
						u = null;
					}
				}
			}
			//System.out.println(visited.size());
			if (u != null && !visited.visited(u)) { //check for null url and visited				
				
				//System.out.println(u);
				Document page = null;
				try {
					page = Jsoup.connect(u.toString()).get();
					pageBuffer.enqueue(new Tuple<Document, URL>(page, u));
					visited.visit(u); //add url to visited
					crawled++;
					//System.out.println("Got page ");
				} catch (IOException e) {
					System.err.println("Dead link " + u);
					//e.printStackTrace();
				}
				/*try {
					//System.out.println("Retrieving " + u);
					BufferedReader in;
					in = new BufferedReader(new InputStreamReader(u.openStream()));
					String inputLine;
					StringBuilder page = new StringBuilder();
					
					while ((inputLine = in.readLine()) != null) {
						page.append(inputLine);
						page.append("\n");
					}
					
					pageBuffer.enqueue(new Tuple<String, URL>(page.toString(), u));
					in.close(); //close input stream
					visited.visit(u); //add url to visited
				    //System.out.println(page.toString());
				} catch (IOException e) {
				
					System.err.println("Is your computer on the internets?");
					break;
				}*/	
			}// end if
		} while (crawled < limit && alive);
		//System.out.println(pageToRetriveBuffer.size());
		//System.out.println("crawled: " + crawled + " limit" + limit);
		/*try {
			sleep(10000);
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}*/
		Launcher.killThreads();
    } //end run()
	
	/**
	 * Method to kill this thread.
	 */
	public void kill()
	{
		alive = false;
	}

}
