package singleThreaded;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.concurrent.BlockingQueue;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import MultiThreaded.PageParser;

/**
 * Parses the content of a given URL.  Keeps a set of words parsed (for the analyzer),
 * and a set of urls parsed (for the page retriever.)
 * @author Aaron Kaufman
 */
public class PageParserSingle {
	
    private static String RELATIVE_REGEX = "<a[^>]+href\\s*=\\s*[\"']([^'\"> ]*)\\s*['\"]";
    
    private volatile boolean my_finished = false;
    private boolean page_limited = false;
    private volatile List<String> my_link_queue; //List of links for the retriever to grab
    private volatile List<List<String>> my_word_queue; //List of words to feed into analyzer
    private Set<String> my_visited_urls;
    private int my_page_limit;  //Wait for the GUI to tell us to stop.
    private int my_pages_parsed = 0;
    
	private int my_total;
	
	/** 
	 * the total parse time for all pages. 
	 */
	private double my_total_time;
	
	/** 
	 * a constructor of the PageParser 
	 */
	public PageParserSingle() {
		my_link_queue = new ArrayList<String>();
		my_word_queue = new ArrayList<List<String>>();// List of words for each document
		my_total = 0;
		my_visited_urls = new HashSet<String>();// AS - instantiated a HashSet
	}    /**
     * Parses one page in my_url_queue, removing it.
     * It then gets all the links on the page and places it in the_queue,
     * to be read by the PageRetriever.
     * Finally, it reads all the words on the page (delimited by punctuation and
     * brackets and spaces) and places them in the_word_queue for the PageAnalyzer.
     */
    public void parse(URL source)
    {
        double start = System.nanoTime();
            URL page = source; //Get the current page;
            if (my_visited_urls.contains(page.toString())){
                return; //Don't want to parse previously-parsed pages.
            }
            //System.out.println("Starting parse of page: " + page.toString());
            my_pages_parsed++;
            
            StringBuilder buffer = new StringBuilder();
            List<String> links = new ArrayList<String>();
            BufferedReader br;
            try
            {
                br = new BufferedReader(new InputStreamReader(page.openStream()));
                String strTemp = "";
                while(null != (strTemp = br.readLine())){
                   buffer.append(strTemp);
                }
                
            } catch (IOException e)
            {
                //e.printStackTrace();
                //System.out.println("ERROR AT URL: " + page.toString());
                return;
            }
            String html = buffer.toString();
            Pattern p = Pattern.compile(RELATIVE_REGEX);
            Matcher m = p.matcher(html);
            while (m.find()){
                links.add(m.group(1));
            }
    
            
            //System.out.println(html);
            my_word_queue.add(PageParser.getWordList(html));
            my_visited_urls.add(page.toString());
            String[] url_subdomains = page.toString().split("/");
            
            ArrayList<String> modified_list = new ArrayList<String>();
            for (String s : links) //Now, modify the relative urls to be absolute urls.
            {
                my_total++;
                //First, turn the address into a series of subaddresses.
                
                if (s.length() >= 2 && s.substring(0, 2).equals("..")) //We need to step backwards before going further.
                {
                    StringBuilder absolute_version = new StringBuilder();
                    //count up number of steps to take backwards.
                    
                    String[] relative_subdomains = s.split("/");
                    int steps_back = 0;
                    while (relative_subdomains[steps_back].equals("..")){
                        steps_back++;
                    }
                        
                    for (int i = 0; i < url_subdomains.length - steps_back; i++) 
                    {
                        absolute_version.append(url_subdomains[i]);
                        absolute_version.append("/");
                    }
                    for (int i = 0; i < relative_subdomains.length; i ++)
                    {
                        if (relative_subdomains[i].equals("..")){
                            continue;
                        }
                        absolute_version.append(relative_subdomains[i]);
                        if (i!= relative_subdomains.length-1){
                            absolute_version.append("/");
                        }
                                
                    }
                    modified_list.add(absolute_version.toString());
                }
                else if (s.length() >= 5 && s.substring(0,5).equals("http:"))  //This is an absolute URL, good as-is.
                {
                    modified_list.add(s);
                }
                else //Otherwise, we're facing a relative url off our current branch.
                {
                    StringBuilder absolute_version = new StringBuilder();
                    for (int i = 0; i < url_subdomains.length; i++) //Take all but the last element
                    {
                        
                        if (i == url_subdomains.length - 1 
                                && url_subdomains[i].length() >= 5
                                && ".html".equals( url_subdomains[i].substring(url_subdomains[i].length()-5, url_subdomains[i].length()))){

                            absolute_version.append("/");
                            absolute_version.append(s);
                        }
                        else if (i == url_subdomains.length - 1 ) {
                            absolute_version.append("/");
                            absolute_version.append(url_subdomains[i]);
                            absolute_version.append("/");
                            absolute_version.append(s);
                        }
                        else{
                            if (i != 0)
                                absolute_version.append("/"); //Don't want / to go at the beginning.
                            absolute_version.append(url_subdomains[i]);
                        }
                    }
                    //Do not add to list if the retrieved page is just a picture.
                    if (!endsWith(absolute_version.toString(), ".png")
                            && !endsWith(absolute_version.toString(), ".jpg")
                            && !endsWith(absolute_version.toString(), ".gif"))
                    {
                        modified_list.add(absolute_version.toString());
                    }
                            
                       
                }
               
            }
        
            my_link_queue.addAll(modified_list);
        
       
    
		
		double end = System.nanoTime();
		double add = end - start;
		my_total_time = my_total_time + add;
	}
	
	public static boolean endsWith(String subject, String ending){
	    int end_length = ending.length();
	    if (subject.length() < ending.length())
	    {
	        return false;
	    }
	    if (subject.substring(subject.length()-end_length,subject.length()).equals(ending))
	    {
	        return true;
	    }
	    
	    return false;
	}
	/**
	 * get the total number of links found. 
	 * @return the total number of links.
	 */
	public int getTotalLinks() {
		return my_total;
	}
	
	/**
	 * get the list of the links founds.
	 * @return a list containing all the links.
	 */
	public List<String> getLinks() {
		return my_link_queue;
	}
	
	/**
	 * get the list of the contents 
	 */
	public List<List<String>> getContentsForAllSources()
	{
		return my_word_queue;
	}
	
	/**
	 * get the total time it parses all the pages.
	 */
	public double getTotalParseTime() {
		return my_total_time;
	}
}