

package homework9package;

import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.ObjectInput;
import java.io.ObjectInputStream;
import java.io.ObjectOutput;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.HashMap;


/**
 * urlreader class
 * 
 * this class provides functions that help with reading the html from
 * a webpage
 * 
 * @author      Elizabeth Mahon and Orion Taylor (added comments)
 * @course Software Engineering
 * @date 3-10-13
 */

public class urlreader
{
	//local_URL represents the URL that this urlreader object actually loaded
	private String local_URL;
	
	//URL_list is a list of the URL's on the webpage associated with the URL
	private ArrayList<URL> URL_list;
	
	//page_content is all the non-HTML text on the webpage
	private String page_content;
	
	//word_freq is a HashMap that maps a word in the page to
	//its corresponding frequency
	private HashMap<String,Integer> word_freq;
	
	/** explicit no arg constructor for urlreader
	 * 
	 * this urlreader will do pretty much diddly squat since
	 * you didn't give it a URL you idiot...
	 * 
	 * just kidding, you can use an empty urlreader object to deserialize
	 * other urlreader objects. not much else however
	 * 
	 * @param none: this is the no_arg constructor, remember?
	 * @return void: constructors do not return anythign
	 */
	public urlreader(){
		//yeah, nothing really goes here.
	}
	
	/** explicit set-the-fields constructor for urlreader
	 * 
	 * essentially allows you to create an instance of
	 * urlreader by merely setting each field to the value you want
	 * really only meant to be used for deserialization
	 * 
	 * @param lURL- this is what you will store in local_URL
	 * @param lURL_list - this is what you will store in URL_list
	 * @param lpage_content- this is what you will store in page_content
	 * @param lword_freq- this is what you will store in word_freq
	 * @return void: constructors do not return anything
	 */
	public urlreader(String lURL,ArrayList<URL>  lURL_list, 
			String lpage_content,HashMap<String,Integer> lword_freq){
		this.local_URL=lURL;
		this.URL_list=lURL_list;
		this.page_content=lpage_content;
		this.word_freq=lword_freq;
	}
	
	/** explicit constructor for urlreader
	 * 
	 * the constructor takes in a url as a parameter
	 * it then opens up the webpage associated with the url
	 * 
	 * URL's found in the webpage will be stored in URL_list
	 * non-HTML text found on the webpage will be stored in page_content
	 * 
	 * @param url: a String representing the url of the webpage
	 * @return void: constructors do not return anythign
	 */
	public urlreader(String url){
		
		//store all the HTML of the webpage onto content
		String content=this.readURL(url);
		
		//store the url we care about as local_URL
		this.local_URL=url;
		
		//URL's found in the webpage will be stored in URL_list
		this.URL_list=this.extractURL(url, content);
		
		//non-HTML text found on the webpage will be stored in page_content
		this.page_content=this.removeHTML(content);
		
		//word_freq is a map of words in page content to their frequencies
		this.word_freq=this.createHistogram(this.page_content);
	}
	
	/** explicit constructor for urlreader
	 * 
	 * the constructor takes in a url as a parameter
	 * it then opens up the webpage associated with the url
	 * 
	 * URL's found in the webpage will be stored in URL_list
	 * non-HTML text found on the webpage will be stored in page_content
	 * 
	 * @param url: a URL representing the url of the webpage
	 * @return void: constructors do not return anythign
	 */
	public urlreader(URL url){
		
		//store all the HTML of the webpage onto content
		String content=this.readURL(url.toString());
		
		//store the url we care about as local_URL
		this.local_URL=url.toString();
		
		//URL's found in the webpage will be stored in URL_list
		this.URL_list=this.extractURL(url.toString(), content);
		
		//non-HTML text found on the webpage will be stored in page_content
		this.page_content=this.removeHTML(content);
		
		//word_freq is a map of words in page content to their frequencies
		this.word_freq=this.createHistogram(this.page_content);
	}
	
	/** getlocal_URL()
	 * 
	 * this is a getter for the local_URL field.
	 * not much more to say here...
	 */
	public String getlocal_URL(){
		return this.local_URL;
	}
	
	/** getURL_list()
	 * 
	 * this is a getter for the URL_list field.
	 * not much more to say here...
	 */
	public ArrayList<URL> getURL_list(){
		return this.URL_list;
	}
	
	/**getword_freq()
	 * 
	 * this is a getter for word_freq.
	 * not much more to say here...
	 */
	public HashMap<String,Integer> getword_freq(){
		return this.word_freq;
	}
	
	/**getpage_content()
	 * 
	 * this is a getter for page_content.
	 * not much more to say here...
	 */
	public String getpage_content(){
		return this.page_content;
	}
	
	/** function readUrl
	 * 
	 * this function takes in a string representing a url as input
	 * it then downloads the html content associated with the url
	 * finally, it outputs that content as a string
	 * 
	 * @param url: a String representing the url of the webpage
	 * @return String: the html content associated with the webpage
	 * if something goes wrong during the download, this function returns null
	 */
	private String readURL(String url)
	{
		//we need a try-catch block because it is
		//possible for there to be a MalformedURLException
		//or an IOException. neato!
		try {
			//boilerplate code for managing url's.
			URL place = new URL(url);
			InputStream basic_conn = place.openStream();
			InputStreamReader reader = new InputStreamReader(basic_conn);
			
			//data is a BufferedReader that will spew out the 
			//contents of the webpage at the url
			BufferedReader data = new BufferedReader(reader);
			
			//line represents a line of html in the url
			String line;
			
			//html_data represents all of the html on the webpage
			String html_data="";
			
			//while there is still stuff left to read...
			while((line = data.readLine()) != null)
			{
				//append the next line of html to data
				html_data=html_data+line;
			}
			
			//return the content of the webpage
			return html_data;
			
		} catch (MalformedURLException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
		//well, if an exception was thrown, we'll just return null
		//(it's to let the user known that something went wrong!)
		return null;
	}
	
	/** function removeHTML
	 * 
	 * this function takes in a string that represents the html of a webpage
	 * it then removes any expression contained inside angle brackets
	 * 
	 * @param content: a String representing html of a website
	 * @return String: the string representing the text inside of content
	 */
	private String removeHTML(String content){
		
		//bracket_count represents how far into nested brackets we are
		int bracket_count=0;
		
		//return_string is the html content without the stuff inside brackets
		String return_string="";
		
		//iterate through the characters of our input string
		for(char c:content.toCharArray()){
			
			//if we see a left brackets
			if(c=='<'){
				//increment our nesting count
				bracket_count++;
			}
			
			//if we are not nested in a brackets
			if(bracket_count==0){
				//append the current character to the return_string
				return_string=return_string+c;
			}
			
			//if we are nested and see a right brackets
			if(c=='>' && bracket_count>0){
				
				//decrement our nesting count
				bracket_count--;
			}
			
		}
		
		//at this point, return_string should be content minus the stuff
		//in angle brackets. return it.
		return return_string;
	}
	
	
	/** function extractURL
	 * 
	 * this function takes in a string that represents the html of a webpage
	 * it then identifies any content that may be a URL, and extracts it
	 * it then returns an ArrayList of URL's representing any URL's found
	 * in the webpage.
	 * 
	 * @param my_URL: a String representing the URL of the website 
	 * (necessary for relative links)
	 * 
	 * @param content: a String representing html of a website
	 * 
	 * @return ArrayList: a list of the url's in the webpage
	 */
	private ArrayList <URL> extractURL(String my_URL, String content){
		
		//return_list is the list of URLs representing URL's found in webpage
		ArrayList <URL> return_list=new ArrayList<URL> ();
		
		//this is the list of strings that represents the links we find
		ArrayList<String> url_list = new ArrayList<String>();
		
		//first, lets extract any expressions inside angle brackets
		ArrayList <String> bracket_list=this.extractHTML(content);
		
		//we will now iterate through the expressions inside angle brackets
		for(String bracket_string:bracket_list){
			//convert everything to upper case so we don't have to
			//worry if things are case sensitive or not
			String bracket_string_prime=bracket_string.toUpperCase();
			
			//j is the location of the expression "HREF", in our HTML expression
			int j=bracket_string_prime.indexOf("HREF");
			
			//if there actually is a URL in our HTML...
			if(j!=-1){
				
				//url_string is the string representing the url
				String url_string="";
				
				//in_quotes represents whether or not we are in quotes
				boolean in_quotes=false;
				
				//iterate through the characters in bracket_string starting
				//at the expression HREF
				while(j<bracket_string.length()){
					
					//if the character is a quotation mark
					if(bracket_string.charAt(j)=='\"'){
						//if we were in quotes, we now aren't and visa versa
						in_quotes=!in_quotes;
						
						//if we were are no longer in quotes, this means that
						//we are leaving the quotations, and are therefore
						//done with extracting the URL in the expression
						if(!in_quotes){
							//thus, stop iterating through bracket_string
							break;
						}  
					}
					
					//if we are in quotes, then append the next character
					//to our return string
					if(in_quotes){
						url_string=url_string+bracket_string.charAt(j);
					}
					
					j++;
				}
				
				//if we are no longer in a quotation and have stuff in our
				//url string, then we can append it (except for the first quotation
				//mark) to the list we are returning
				if(!in_quotes&&url_string.length()>0){
					url_list.add(url_string.substring(1));
				}
			}
		}
		
		//iterate through every URL we found and check for email links, relative links
		for(String url:url_list)
		{
			if(url.contains("mailto"))
			{
				//do nothing; we don't want this link
			}
			else if(!url.substring(0,4).contains("http"))
			{
				//we know this is a relative link; so we need to process it like one
				//we need to have the try/catch so the compiler doesn't complain
				//but we are unlikely to ever get the error because we couldn't
				//process the HTML if our url is malformed
		        try{
		            URL main = new URL(my_URL);
		            URL relative = new URL(main, url);
		            return_list.add(relative);
		            }
		            catch(MalformedURLException e)
		            {
		                System.out.println("bad url: "+url);
		            }
			}
			else
			{
				//this is a normal link; treat it as such
				//again, we need the try/catch to prevent compiler complaining
				//but if the URL is bad, it's the website's fault, not ours.
				try {
					URL found = new URL(url);
					return_list.add(found);
				} catch (MalformedURLException e) {
					System.out.println("Bad url: "+url);
				}
			}
		}
		
		
		//at this point, we can just return the return_list;
		return return_list;
	}
	
	/** function extractHTML
	 * 
	 * this function takes in a string that represents the html of a webpage
	 * extract any expression contained inside angle brackets
	 * each expression is stored as a string in an ArrayList
	 * the ArrayList is then returned
	 * 
	 * @param content: a String representing html of a website
	 * @return ArrayList: the list of expressions found inisde <>'s
	 */
	private ArrayList<String> extractHTML(String content){
		
		//return_list is the list of strings representing stuff in angle brackets
		ArrayList <String> return_list=new ArrayList<String> ();
		
		
		//bracket_count represents how far into nested brackets we are
		int bracket_count=0;
		
		//return_string is the html content without the stuff inside brackets
		String return_string="";
		
		//iterate through the characters of our input string
		for(char c:content.toCharArray()){
			
			//if we see a left brackets
			if(c=='<'){
				//increment our nesting count
				bracket_count++;
			}
			
			//if we are nested in brackets
			if(bracket_count>0){
				//append the current character to the return_string
				return_string=return_string+c;
			}
			
			//if we are nested and see a right brackets
			if(c=='>' && bracket_count>0){
				
				//decrement our nesting count
				bracket_count--;
				
				//if this has caused us to jump out of a set of brackets
				if(bracket_count==0){
					//append return_string to return_list
					return_list.add(return_string);
					return_string="";
				}
			}
			
		}
		
		//at this point, we can just return the return_list;
		return return_list;
	}
	
	/** function createHistogram
	 * 
	 * this function takes in a piece of texts and breaks it up into
	 * a HashMap that maps the words in the page 
	 * (all lowercase) to their corresponding frequencies
	 * which is then returned
	 * 
	 * @param text: the text you want to split up
	 * @return HashMap(String,integer): a map of words in the text
	 * to their corresponding frequencies
	 */
	private HashMap <String,Integer> createHistogram(String text){
		
		//lower_text is a version of text where everything is lowercase
		String lower_text=text.toLowerCase();
		
		//text_list is the list of words in the text
		String [] text_list=lower_text.split(" ");
		
		//freqMap maps the given word to its frequency in text
	    HashMap<String,Integer> freqMap=new HashMap<String,Integer> ();
	    
	    //iterate through the word list
	    for(String term:text_list){
	    	
	    	//if the given term is not already in the map
	    	if(!freqMap.containsKey(term)){
	    		
	    		//then store one as the frequency for the term
	    		freqMap.put(term, new Integer(1));
	    	}
	    	else{
	    		
	    		//otherwise, increment the frequency of the term
	    		freqMap.put(term, freqMap.get(term)+1);
	    	}
	    }
	    
	    return freqMap;
		
	}
	
	/** function serialize
	 * 
	 * this function serializes the urlreader object 
	 * 
	 * @param none: 
	 * @return byte[]: the serialization of the urlreader object
	 */
	public byte[] serialize() throws IOException,
	ClassNotFoundException{
		
		
		//this is just an instance of serial class
		//that will be used for making (de)serializing objects
		serial_class my_serial=new serial_class();		 
		
		//create a HashMap which maps each field name to the
		//the current value of the field
		HashMap <String,Object> fieldMap=new HashMap<String,Object>();
		
		//store the serialization of the current value of the field
		//under each field name
		fieldMap.put("local_URL",(Object) this.local_URL);
		fieldMap.put("page_content",(Object) this.page_content);
		fieldMap.put("URL_list", (Object) this.URL_list);
		fieldMap.put("word_freq",(Object) this.word_freq);
		
		//return the serialization of the map.
		return my_serial.serialize(fieldMap);
	}
	
	/** function deserialize
	 * 
	 * this function deserializes the serialization of a urlreader object
	 * 
	 * @param yourBytes: the serializtion of the urlreader we care about
	 * @return urlreader: the urlreader we want deserialized
	 */
	public  urlreader deserialize(byte[] yourBytes) throws IOException,
	ClassNotFoundException{
		
		//this is just an instance of serial class
		//that will be used for making (de)serializing objects
		serial_class my_serial=new serial_class();		 
		
		//deserialize theserialization of urlreader
		//which gives us a HashMap which maps each field name to the
		//serialization of the current value of the field
		@SuppressWarnings("unchecked")
		HashMap <String,Object> fieldMap=
				(HashMap<String,Object>) my_serial.deserialize(yourBytes);
		
		//cast all the fields to their correct types,
		//then stick them in a new urlreader object and return it
		 return new urlreader( (String)fieldMap.get("local_URL"),
						(ArrayList<URL>) fieldMap.get("URL_list"),
						(String) fieldMap.get("page_content"),
						(HashMap<String,Integer>) fieldMap.get("word_freq"));
	
		
		
	}
	
}
