/* Team 5
 * James McQueen
 * Corwyn Simpson
 * May 1 ish, 2012
 */
package spider.controllers;

import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;

import spider.models.Page;
import spider.stores.LinkBuffer;
import spider.stores.PageBuffer;

/**
 * Manages the creation of multiple Retrievers.
 *
 * @author James McQueen
 * @author Corwyn Simpson
 */
public class RetrieverGroup implements Runnable
{
    /**
     * This is multiplied by an instances my_max_threads to determine how many
     * unfinished retrievers to have running when making new retrievers. This is
     * so that when a retriever finishes while this thread is sleeping, there
     * are still new Retrievers that can be run by the thread pool.
     */
    private static final double EXTRA_RETRIEVERS_MULT = 1.2;
    /**
     * The LinkBuffer containing the links produced by the Parser. The links are
     * used to create pages.
     */
    private LinkBuffer my_links;
    /**
     * The PageBuffer containing the pages produced by the Retriever. The pages
     * are used by the parser to identify keywords and create new links.
     */
    private PageBuffer my_pages;
    /**
     * The number of pages to retrieve before the RetrieverGroup should stop
     * running.
     */
    private int my_retrieved_pages_limit;
    /**
     * The thread pool that executes all the individual Retrievers when running
     * this class with run().
     */
    private ExecutorService my_thread_pool;
    /**
     * The maximum number of threads this RetrieverGroup should run at once.
     */
    private int my_max_threads;
    /**
     * The list of Futures used to track the progress of Retriever threads that
     * have been created and added to the thread pool.
     */
    private List<Future<Retriever>> my_future_list;
    /**
     * The ParserGroup to conspire with to make sure that Retrieving does not
     * stop prematurely.
     */
    private ParserGroup my_partner;
    /**
     * Boolean flag so that other threads can check if this thread is running.
     */
    private boolean my_running_flag;
    /**
     * The list of bad links that were found in storePage(Retriever).
     */
    private List<URL> my_bad_links;

    /**
     * Creates a new RetrieverGroup with the given things. Only the first two
     * parameters are necessary if this is not going to be run.
     * @param the_links The LinkBuffer to pull links from for retrieving.
     * @param the_pages The PageBuffer to push successfully retrieved pages to.
     * @param the_work_limit The number of web pages to retrieve before exiting.
     * @param some_max_threads The number of concurrent threads to use for 
     * retrieving pages.
     */
    public RetrieverGroup(LinkBuffer the_links, PageBuffer the_pages,
            int the_work_limit, int some_max_threads) {
        my_links = the_links;
        my_pages = the_pages;
        my_retrieved_pages_limit = the_work_limit;
        my_thread_pool = Executors.newFixedThreadPool(some_max_threads);
        my_max_threads = some_max_threads;
        my_future_list = new LinkedList<Future<Retriever>>();
        my_bad_links = new LinkedList<URL>();
    }

    /**
     * Returns the PageBuffer used by this RetrieverGroup to set new page in.
     */
    public PageBuffer getPageBuffer() 
    {
        return my_pages;
    }
    /**
     * Returns the LinkBuffer used by this RetrieverGroup to get new links from.
     */
    public LinkBuffer getLinkBuffer() 
    {
        return my_links;
    }
    
    /**
     * Returns a list of links pulled from the LinkBuffer that were invalid 
     * links.
     */
    public List<URL> getBadLinks()
    {
        return Collections.unmodifiableList(my_bad_links);
    }

    /**
     * Stores the page that the_retriever retrieved to the PageBuffer.
     * @param the_retriever The retriever to get the fetched Page from.
     */
    public void storePage(final Retriever the_retriever) throws InterruptedException 
    {
        final Page page = the_retriever.getPage();
        if (page != null)
        {
            my_pages.addPage(page);
        }
        else
        {
            my_bad_links.add(the_retriever.getURL());
        }
    }
    
     /**
     * Returns true if there are no more links in the LinkBuffer and the last
     * Retriever has finished running. Only useful when this is run as a thread.
     */
    public boolean isDone() 
    {
        synchronized (my_future_list)
        {
            //I have no more sub-threads and no more links to fetch, OR
            return (my_future_list.isEmpty() && my_links.isEmpty()) || 
            //I am not supposed to get anything else
            !canGetMore();
        }
    }
    
    /**
     * Sets the ParserGroup to poll so that only when both this thread and 
     * the other one are out of work to do will this thread stop running
     */
    public void watchParserGroup(ParserGroup the_parser_group)
    {
        my_partner = the_parser_group;
    }    
    
    /**
     * Returns true if this is RetrieverGroup is currently running.
     */
    public boolean isRunning()
    {
        return my_running_flag;
    }
    
    /**
     * Does everything retriever-related in a highly multi-threaded way.
     */
    public void run()
    {
        my_running_flag = true;
        try
        {
            while ((!this.isDone() || !my_partner.isDone()) && canGetMore())
            {
                //Process all done Retrievers and remove them from the list.
                List<Future<Retriever>> futures_to_remove = 
                    new ArrayList<Future<Retriever>>();

                for (Future<Retriever> future : my_future_list)
                {
                    if (future.isDone())
                    {
                        try
                        {
                            storePage(future.get());
                        }
                        catch (ExecutionException e)
                        {
                            System.err.println("Retriever exception:\n" + e.getCause());
                        }
                        futures_to_remove.add(future);
                    }
                }
                //Actually remove the done Futures now that we aren't iterating
                my_future_list.removeAll(futures_to_remove);
                //Add new retrievers until we have a few extra beyond my_max_threads
                while (my_future_list.size() < (my_max_threads * EXTRA_RETRIEVERS_MULT)
                        && !my_links.isEmpty() && canGetMore())
                {
                    //these two need to be atomic together
                    synchronized (my_future_list)
                    {
                        Retriever new_retriever = new Retriever(this);
                        my_future_list.add(my_thread_pool.submit(new_retriever, new_retriever));
                    }
                }
                Thread.sleep(5);
            }
        }
        catch (InterruptedException e)
        {
            System.err.println("RetrieverGroup interrupted\n");
        }
        System.out.println("RetrieverGroup stopped running\n");
        my_running_flag = false;
    }

    /**
     * Returns true if creating new jobs will not put us past the threshhold of
     * pages to process.
     */
    private boolean canGetMore()
    {
        int retrieved_pages = my_links.visitedCount() - my_bad_links.size() - 
                            my_future_list.size();
        return retrieved_pages < my_retrieved_pages_limit;
    }
}
