/*
 * Winter 2014 
 * TCSS 422 - Computer Operating System
 * Project1 - Web Crawler
 */

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashSet;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;

/**
 * The PageRetriever class retrieves web pages and stores them for later
 * analysis by the PageParser class. Only URLs specifying HTML or text documents
 * is being retrieved. Each unique URL is retrieved only once. This class will
 * need to utilize to a collection of URLs waiting to be retrieved, as well as a
 * repository in which to store the documents' contents.
 * 
 * @author Mayuri mayuri@uw.edu
 * @version Winter 2014
 * 
 */
public class PageRetriever implements  Runnable{

	/**
	 * The blocking queue used to store all the URLs obtained while parsing.
	 */
	private BlockingQueue<String> myUrlRetrievalQueue;

	/**
	 * The blocking queue used to store all the web pages for parsing.
	 */
	private BlockingQueue<PageContent> myPagesParserQueue;

	/**
	 * The HashSet used to store all the URLs that has been retrieved.
	 */
	private HashSet<String> myUrlList = new HashSet<String>();

	/**
	 * The maximum number of URLs to be retrieved.
	 */
	private final int myMaxPages;

    /**
     * State of the Page Retriever. True if complete False if running
     */
    private boolean complete = false;

	/**
	 * The constructor for this class that takes the initial URL as parameter.
	 */
	public PageRetriever(String theInitialUrl, int theMaxPages, BlockingQueue<String> theUrlRetrievalQueue,
                         BlockingQueue<PageContent> thePagesParserQueue) {

        this.myUrlRetrievalQueue = theUrlRetrievalQueue;
        this.myPagesParserQueue = thePagesParserQueue;
        this.myMaxPages = theMaxPages;

        if(!isValidURL(theInitialUrl)) {
            throw new IllegalArgumentException("Cannot start with an invalid URL. " + theInitialUrl);
        }
		addURL(theInitialUrl);


	}

    @Override
    public void run() {
        try {
            retrieve();
        } catch (InterruptedException e) {
            System.out.println("An interrupt exception occurred...");
            e.printStackTrace();
        }
    }

    public boolean isComplete() {
        return  complete;

    }

    public void forceKill() {
        complete = true;
        //may be need to tell the parser that someone invoked stop on me.
    }



	/**
	 * The processQueue method which retrieves the web pages
	 * corresponding to the URLs in the queue. This queue is populated by the
	 * page parser method..
	 * 
	 * @throws InterruptedException
	 *             when the thread is interrupted
	 */
	public void retrieve() throws InterruptedException {


        String str = myUrlRetrievalQueue.poll(1, TimeUnit.MILLISECONDS);
        if (str != null && !str.isEmpty()) {
            try {
                retrieveWebPage(str);
            } catch (MalformedURLException e) {
                System.out.println("Malformed URL entered....");
                e.printStackTrace();
            }
        }
	}

	/**
	 * The retrieveWebPage method which retrieves the web page corresponding to
	 * a given URL. This method is called by the retrieveWebPagesFromQueue
	 * method.
	 * 
	 * @param a_Url
	 *            to retrieve the web page.
	 * @throws MalformedURLException
	 */

	private void retrieveWebPage(String a_Url) throws MalformedURLException {

		URL theUrl = new URL(a_Url);
		InputStream is = null;
		BufferedReader br;
		String line;

		try {
			is = theUrl.openStream(); // throws an IOException
			br = new BufferedReader(new InputStreamReader(is));
			StringBuilder sd = new StringBuilder();

			while ((line = br.readLine()) != null) {
				sd.append(line);
			}
            PageContent pageContent = new PageContent(theUrl.toString(), sd.toString());
            System.out.println("****Adding page content to queue...");
            System.out.println(pageContent);
            myPagesParserQueue.add(pageContent);
			myUrlList.add(theUrl.toString());

		} catch (MalformedURLException mue) {
			mue.printStackTrace();
		} catch (IOException ioe) {
			ioe.printStackTrace();
		} finally {
			try {
				if (is != null)
					is.close();
			} catch (IOException ioe) {
				// nothing to see here
			}
		}

	}

    /**
     * Checks if the url to be retrieved is valid or not
     * @param theUrl
     * @return
     */
    private boolean isValidURL(String theUrl){
        boolean isValid = true;
        try {
            URL url = new URL(theUrl);
            HttpURLConnection conn = (HttpURLConnection)url.openConnection();
            conn.connect();
        } catch (MalformedURLException e) {
            isValid = false;
        } catch (IOException e) {
            isValid = false;
        } catch (ClassCastException e) {
            isValid = false;
        }
        return isValid;
    }


    /**
	 * The addURL method which add the URLs to the set of URLs to be retrieved.
	 * This method is called by the PageParser class to populate the
	 * myUrlQueueToRetrieve queue.
	 * 
	 * @param a_Url
	 *            to be retrieve.
	 */
	public void addURL(final String a_Url) {

		if (myUrlList.size() >= myMaxPages) {
            System.out.println("*** Not adding any more pages to retriever queue.");
			return;
		} else {
			if (!myUrlList.contains(a_Url) && isValidURL(a_Url)) {
                myUrlRetrievalQueue.add(a_Url);
			} else {
                System.err.println("*** The given URL is either not valid or already processed.: " + a_Url);
            }
		}

	}

	/**
	 * The getWebPagesQueueToParse method to get the myWebPagesQueueToParse
	 * Queue. The PageParser method calls this method to get access to the Web
	 * pages stored in the queue.
	 * 
	 * @returns the BlockingQueue myWebPagesQueueToParse.
	 */
	public BlockingQueue<PageContent> getWebPagesQueueToParse() {
		return myPagesParserQueue;
	}

}