package com4280;

import java.util.*;
import java.net.*;
import java.io.*;
import javax.swing.text.*;
import javax.swing.text.html.*;


/**
 * Adapted from Jeff Heaton's Spider.
 * This no longer implements isSpiderReportable.
 * @author Andrew Evans 
 * @author Duncan Grant
 * @author Mitchell Whitehouse 
 * @version 1.0
 */
public class Spider implements Runnable 
{
	protected ArrayList<URL> workloadError = new ArrayList<URL>(3);
	protected ArrayList<URL> workloadWaiting = new ArrayList<URL>(3);
	protected ArrayList<URL> workloadProcessed = new ArrayList<URL>(3);

	protected volatile MADBot bot;

	protected volatile boolean cancel = false;  
	public final Object eventObject = new Object();
	public volatile boolean paused;
	private int processedPages = 0;

	/**
	  * Constructs a new Spider.
	  * @param bot the MADBot object to which this spider reports.
	  * @param seed the starting URL from which this spider should begin.
	  */
	public Spider(MADBot bot, String seed)
	{
		try 
		{
			URL seedURL = new URL(seed);
			addURL(seedURL);
		} 
		catch (MalformedURLException e) 
		{
			e.printStackTrace();
		}
		this.bot = bot;
	}

	/**
	* Get the URLs that resulted in an error.
	* @return A collection of URLs.
	*/
	public ArrayList<URL> getWorkloadError()
	{
		return workloadError;
	}

	/**
	* Get the URLs that were waiting to be processed. 
	* You should add one URL to this collection to 
	* begin the spider.
	* 
	* @return A collection of URLs.
	*/
	public ArrayList<URL> getWorkloadWaiting()
	{
		return workloadWaiting;
	}

	/**
	* Get the URLs that were processed by this spider.
	* 
	* @return A collection of URLs.
	*/
	public ArrayList<URL> getWorkloadProcessed()
	{
		return workloadProcessed;
	}

	/**
	* Clear all of the workloads.
	*/
	public void clear()
	{
		getWorkloadError().clear();
		getWorkloadWaiting().clear();
		getWorkloadProcessed().clear();
	}

	/**
	* Set a flag that will cause the begin
	* method to return before it is done.
	*/
	public void cancel()
	{
		cancel = true;
	}

	/**
	* Add a URL for processing.
	* @param url
	*/
	public void addURL(URL url)
	{  
		if(!getWorkloadWaiting().contains(url) && !getWorkloadError().contains(url) && !getWorkloadProcessed().contains(url))
		{
			getWorkloadWaiting().add(url);
		}
	}

	/**
	* Called internally to process a URL. Sets up RequestProperties.
	* Accepts a link of text/* EXCLUDING text/css. Outputs successful URLs 
	* to the terminal window and to the GUI.
	* Waits 0.2 seconds before returning to not overload the server.
	* 
	* @param url The URL to be processed.
	*/
	public void processURL(URL url)
	{
		try 
		{
			// get the URL's contents
			URLConnection connection = url.openConnection();
			connection.setRequestProperty("User-Agent","MADBOT - As part of a web-based university course: f.ciravegna@dcs.shef.ac.uk");
			connection.setRequestProperty("Content-Language","en");
			connection.setRequestProperty("Accept","text/html,text/xml,application/xhtml+xml,text/plain,application/xml");
			  
			//Accept all text/* EXCLUDING text/css
			if ((connection.getContentType() != null) 
					&& (!connection.getContentType().toLowerCase().startsWith("text/")
					|| connection.getContentType().toLowerCase().equals("text/css"))) 
			{
				getWorkloadWaiting().remove(url);        
				getWorkloadProcessed().add(url);    

				return;
			}
			
			processedPages++;
			//print to terminal and to GUI
			System.out.println(url);
			bot.addOutput(url.toString());
			  
			// read the URL
			InputStream is = connection.getInputStream();
			Reader r = new InputStreamReader(is);
			// parse the URL
			HTMLEditorKit.Parser parse = new HTMLParse().getParser();
			parse.parse(r,new Parser(url),true);
		} 
		catch(Exception e) 
		{
			getWorkloadWaiting().remove(url);
			getWorkloadError().add(url);
			return;
		}
		
		// mark URL as complete
		getWorkloadWaiting().remove(url);        
		getWorkloadProcessed().add(url);
		
		sleep();
	}

	/**
	  * The core loop. While there is still workload waiting and the user has
	  * not opted to cancel, then continue processing URLs. Also implements
	  * pausing, whereby a user may opt for this thread to pause briefly before
	  * resuming.
	  * Finally, prints out statistics about the run.
	  */
	public void run() 
	{
		cancel = false;
		
		while (!getWorkloadWaiting().isEmpty() && !cancel)
		{	
			Object list[] = getWorkloadWaiting().toArray();
			for (int i=0;(i<list.length);i++)
			{
				if(cancel)
					break;
				if(paused)
				{
					synchronized(eventObject)
					{
						try
						{
							eventObject.wait();
						}
						catch(InterruptedException ie)
						{
						}	
					}
				}
				
				processURL((URL)list[i]);
			}
		}
		
		System.out.println("Done. Processed " + processedPages + " pages.");
		
		bot.writeLocalFile();
		bot.writeExternalFile();
		System.out.println(bot.getLocalIWUrls().length + " internal links");
		System.out.println(bot.getExternalIWURLs().length + " external links");
	}

/**
  * Waits for 0.2 seconds. Stops server overload.
  */
private void sleep()
{
	try
	{
		Thread.sleep(200);
	}
	catch(Exception e)
	{
	}
}
	  
 



/**
 * A HTML parser callback used by this class to
 * detect links.
 * 
 * @author Jeff Heaton
 * @version 1.0
 */
  protected class Parser
  extends HTMLEditorKit.ParserCallback {
    protected URL base;

    public Parser(URL base)
    {
      this.base = base;
    }

    public void handleSimpleTag(HTML.Tag t,
                                MutableAttributeSet a,int pos)
    {
      String href = (String)a.getAttribute(HTML.Attribute.HREF);
      
      if( (href==null) && (t==HTML.Tag.FRAME) )
        href = (String)a.getAttribute(HTML.Attribute.SRC);
        
      if ( href==null )
        return;

      int i = href.indexOf('#');
      if ( i!=-1 )
        href = href.substring(0,i);

      if ( href.toLowerCase().startsWith("mailto:") ) {
        return;
      }

      handleLink(base,href);
    }

    public void handleStartTag(HTML.Tag t, MutableAttributeSet a,int pos)
    {
      handleSimpleTag(t,a,pos);// handle the same way
    }

    protected void handleLink(URL base,String str)
    {
      try
	  {
        URL url = new URL(base,str);            
        if (bot.spiderFoundURL(url))
          addURL(url);
      } catch ( MalformedURLException e ) {
        //log("Found malformed URL: " + str );
      }
    }

  }   

  /**
   * Called internally to log information.
   * This basic method just writes the log
   * out to the stdout.
   * 
   * @param entry The information to be written to the log.
   */
  public void log(String entry)
  {
    System.out.println( (new Date()) + ":" + entry );
  }
}