import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Scanner;
import java.util.Set;
import java.util.Map.Entry;

import multiThread.Buffer;
import multiThread.ThreadedDataCollector;
import multiThread.ThreadedPageParser;
import multiThread.ThreadedPageRetriever;
import singlethread.DataCollector;
import singlethread.PageRetriever;
import singlethread.URLBucket;

/**
 * The public userinterface to the WebSpider.
 * @author Darin
 * @author Kirk Leonard
 * @author John Patanian
 * @version Autumn 2009
 *
 */
public class WebSpiderUI 
{
  /**
   * Changes whether the program runs single or multi-threaded.
   */
  private static final boolean IS_THREADED = true;
  /**
  * The maximum number of pages to crawl.
  */
  private static int my_max_pages;
  
  /**
   * Loads the key words in from a text file.
   */
  public static void getKeywords()
  {
    /**
    * Get the first keyword to act as a seed to the webcrawler
    */
    final Scanner inp = new Scanner(System.in);
    
    System.out.println("enter a keyword (or * to finish):");
    final StringBuilder keyWords = new StringBuilder();
    String a_keyword = inp.next();
    
    while (!"*".equals(a_keyword))
    {
      keyWords.append(a_keyword);
      keyWords.append("/n");
      a_keyword = inp.next();
    }

    try 
    {
      PrintStream out;
      out = new PrintStream(new File("userKeywords.txt"));
      out.append(keyWords.toString());
      out.close();
    } 
    catch (final IOException e)
    {
      System.out.println("oops");
    }

  }
  /**
   * The maximum number of pages to call.
   * @return The maximum number of pages to call.
   */
  public int getMaxPages()
  {
    return my_max_pages;
  }

  /**
  * Gets the statistics for the crawl.
  * @param the_page_count The count of the pages crawled.
  * @param the_url_list The list of the pages crawled.
  * @param the_elapsed_time The start time of the crawl.
  */
  public void printStats(final int the_page_count, 
  final URLBucket the_url_list, 
  final long the_elapsed_time)
  {
    final DataCollector the_data_collector = new DataCollector(the_page_count);
    
    System.out.println("Pages Retrieved: " + the_page_count);
    System.out.println("average Words Per Page: " + the_data_collector.getNumWords());
    System.out.println("Average URLs per page: " + 
      (the_url_list.getNumLinks() / the_page_count));
    System.out.println("Keyword" + "\t\tAvg. Hits per page" + "\tTotal Hits");

    final Set<Entry<String, Integer>> set = the_data_collector.getWordCounts();

    for (Entry<String, Integer> a_entry : set)
    {
      String keyword = a_entry.getKey().toString();

      final double value = (double) Integer.parseInt(a_entry.getValue().toString());

      //fix tab issues due to keyword length -- there's gotta be a better way
      if (keyword.length() < 8)
      {
        keyword = keyword + "\t";
      }

      System.out.println(keyword + "\t\t" + value / the_page_count + "\t\t" + value);
    }


    System.out.println("Total Running time: " + the_elapsed_time  + " seconds");
  }

  /**
  * Gets the seed url.
  * @return The seed url.
  */
  public static URL getFirstURL()
  {
    final Scanner inp = new Scanner(System.in); // get first url
    System.out.println("how many pages to parse?  ");
    
    my_max_pages = Integer.parseInt(inp.next());
    System.out.println("enter a url: ");
    URL a_url = null;
    try
    {
      a_url = new URL(inp.next());
    }
    catch (final MalformedURLException e)
    {
      System.out.println("Bad URL" + e);
    }
	
    return a_url;
  }
	
	
  /**
  * Runs the web crawler application.
  * @param the_args Command line arguments, ignored.
  */
  public static void main(final String [] the_args)
  {	
    if (!IS_THREADED)
    {
      crawlSingleThreaded();
    }
    else
    {
      crawlMultiThreaded(); 	
    }
  }
  
  /**
   * Runs the single threaded application.
   */
  private static void crawlSingleThreaded()
  {
	    /* Begin timing the code for capturing statistics */
    final StopWatch code_timer = new StopWatch();

    int max_pages = 0;

				// used to produce filenames and for stats
    int page_count = 0;
				
	// make a place to store urls
	//get the first url from user and add it to the list to start things off
    final URLBucket urlList = new URLBucket();  
	    
    final WebSpiderUI ui = new WebSpiderUI();

    code_timer.start();

	/* as long as there are unvisited urls (or we have visited less than max_page pages)*/
    final URL first_url = WebSpiderUI.getFirstURL();
    max_pages = ui.getMaxPages();
    urlList.addURL(first_url);

    while ((urlList.getUnVisitedSize() > 0) && page_count < max_pages)
    {
      final PageRetriever pr = new PageRetriever(urlList,
	    urlList.getURL(), page_count);
	    
      page_count++;	
    }
				
    code_timer.stop();
	      
		// now that all pages have been acquired, collect the stats
    ui.printStats(page_count, urlList, code_timer.getElapsedTimeSecs());
	  
  }
  /**
   * Crawls in multithreaded mode.
   */
  private static void crawlMultiThreaded()
  {
    /** The buffers.
     * PageRetriever is a consumer of URLBucket.
     * PageParser is a producer for URLBucket.
     * 
     * PageParser is a consumer of PageBuffer
     * 
     */
    final multiThread.URLBucket url_buffer = new multiThread.URLBucket();
    final Buffer<String> page_buffer = new Buffer<String>();
    final Buffer<String> parsed_page_buffer = new Buffer<String>();
    
    /** The threads */
    final ThreadedPageRetriever page_retriever = 
    	new ThreadedPageRetriever(url_buffer, page_buffer);
    
    final ThreadedPageParser parser = 
      new ThreadedPageParser(url_buffer, page_buffer, parsed_page_buffer);
    
    //start with a seed URL
    final URL first_url = WebSpiderUI.getFirstURL();
    
    try
    {
      url_buffer.add(first_url);
    }
    catch (final InterruptedException e)
    {
      //Don't do anything here because the
      //initial entry is not added while the thread is running.
      return;
    }
    
    //Start the threads running
    page_retriever.start();
    
    final Thread parser_thread = new Thread(parser);
    parser_thread.start();
    
    final ThreadedDataCollector data = new ThreadedDataCollector(parsed_page_buffer);
    final Thread data_thread = new Thread(data);
    
    data_thread.start();
    
    while (data.numPagesCollected() < WebSpiderUI.my_max_pages)
    {
      
    }
    
    System.out.println("Now it's time to stop the threads");
    
    
    
  }
  
}
