

package MultiThreaded;

import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import MultiThreaded.MultiThreadSetup;
 import singleThreaded.PageParserSingle;
 /**
  * Parses the content of a given URL.  Keeps a set of words parsed (for the analyzer),
  * and a set of urls parsed (for the page retriever.)
  * @author Aaron Kaufman
  */ 
public class PageParser implements Runnable {

    
    private static String RELATIVE_REGEX = "<a[^>]+href\\s*=\\s*[\"']([^'\"> ]*)\\s*['\"]";

    private volatile boolean is_waiting = false;
    private boolean page_limited = false;
    private  BlockingQueue<String> my_link_queue; //List of links for the retriever to grab
    private  BlockingQueue<List<String>> my_word_queue; //List of words to feed into analyzer
    private  BlockingQueue<URL> my_url_queue; //List of words to feed into analyzer
    private  BlockingQueue<List<String>> my_embedded_urls;
    private Set<String> my_visited_urls;
    private int my_page_limit;  //Wait for the GUI to tell us to stop.
    private int my_pages_parsed = 0;
    
    /**
     * Constructor for the parser that allows it to parse only a certain number of pages
     * @param the_queue  The queue of links that will be sent to the page retriever for URL retrieval.
     * @param the_word_queue The queue of words that will be sent to the page analyzer for analysis.
     * @param the_url_queue The queue of URLs given by the page retriever to be parsed.
     * @param the_page_limit The number of pages that the parser will go before stopping.
     */
    public PageParser(BlockingQueue<String> the_queue,  BlockingQueue<List<String>> the_word_queue,
            BlockingQueue<URL> the_url_queue, BlockingQueue<List<String>> the_embedded_urls, int the_page_limit)
    {
        my_visited_urls = new HashSet<String>();
        page_limited = true;
        my_word_queue = the_word_queue;
        my_link_queue = the_queue;
        my_page_limit = the_page_limit;
        my_url_queue = the_url_queue;
        my_embedded_urls = the_embedded_urls;
    }
    
    
    /**
     * Parses one page in my_url_queue, removing it.
     * It then gets all the links on the page and places it in the_queue,
     * to be read by the PageRetriever.
     * Finally, it reads all the words on the page (delimited by punctuation and
     * brackets and spaces) and places them in the_word_queue for the PageAnalyzer.
     */
    public void parse()
    {
            
            URL page = null;
            try
            {
                is_waiting = true; //Need to wait for the url_queue to fill.
                page = my_url_queue.take();
                is_waiting = false;
            } catch (InterruptedException e1)
            {
                
                page = null;
                e1.printStackTrace();
                return;
            }
            
            if (my_visited_urls.contains(page.toString())){
                return; //Don't want to parse previously-parsed pages.
            }
            //System.out.println("Starting analysis of page: " + page.toString());
            StringBuilder buffer = new StringBuilder();
            List<String> links = new ArrayList<String>();
            BufferedReader br;
            try
            {
                br = new BufferedReader(new InputStreamReader(page.openStream()));
                String strTemp = "";
                while(null != (strTemp = br.readLine())){
                   buffer.append(strTemp);
                }
                
            } catch (IOException e)
            {
                e.printStackTrace();
            }
            String html = buffer.toString();
           
            Pattern p = Pattern.compile(RELATIVE_REGEX);
            Matcher m = p.matcher(html);
            while (m.find()){
                links.add(m.group(1));
            }
            
            
    
            
            //System.out.println(html);

            my_visited_urls.add(page.toString());

            String[] url_subdomains = page.toString().split("/");
            
            ArrayList<String> modified_list = new ArrayList<String>();
            for (String s : links) //Now, modify the relative urls to be absolute urls.
            {
                
                //First, turn the address into a series of subaddresses.
                
                if (s.length() >= 2 && s.substring(0, 2).equals("..")) //We need to step backwards before going further.
                {
                    StringBuilder absolute_version = new StringBuilder();
                    //count up number of steps to take backwards.
                    
                    String[] relative_subdomains = s.split("/");
                    int steps_back = 0;
                    while (relative_subdomains[steps_back].equals("..")){
                        steps_back++;
                    }
                        
                    for (int i = 0; i < url_subdomains.length - steps_back; i++) 
                    {
                        absolute_version.append(url_subdomains[i]);
                        absolute_version.append("/");
                    }
                    for (int i = 0; i < relative_subdomains.length; i ++)
                    {
                        if (relative_subdomains[i].equals("..")){
                            continue;
                        }
                        absolute_version.append(relative_subdomains[i]);
                        if (i!= relative_subdomains.length-1){
                            absolute_version.append("/");
                        }
                                
                    }
                    modified_list.add(absolute_version.toString());
                }
                else if (s.length() >= 5 && (s.substring(0,5).equals("http:") || s.substring(0,5).equals("https")))  //This is an absolute URL, good as-is.
                {
                    modified_list.add(s);
                }
                else //Otherwise, we're facing a relative url off our current branch.
                {
                    StringBuilder absolute_version = new StringBuilder();
                    for (int i = 0; i < url_subdomains.length; i++) //Take all but the last element
                    {
                        
                        if (i == url_subdomains.length - 1 
                                && url_subdomains[i].length() >= 5
                                && ".html".equals( url_subdomains[i].substring(url_subdomains[i].length()-5, url_subdomains[i].length()))){

                            absolute_version.append("/");
                            absolute_version.append(s);
                        }
                        else if (i == url_subdomains.length - 1 ) {
                            absolute_version.append("/");
                            absolute_version.append(url_subdomains[i]);
                            absolute_version.append("/");
                            absolute_version.append(s);
                        }
                        else{
                            if (i != 0)
                                absolute_version.append("/"); //Don't want / to go at the beginning.
                            absolute_version.append(url_subdomains[i]);
                        }
                    }
                    //Do not add to list if the retrieved page is just a picture.
                    if (!PageParserSingle.endsWith(absolute_version.toString(), ".png")
                            && !PageParserSingle.endsWith(absolute_version.toString(), ".jpg")
                            && !PageParserSingle.endsWith(absolute_version.toString(), ".gif"))
                    {
                        modified_list.add(absolute_version.toString());
                        
                        //System.out.println("Added " + absolute_version);
                    }
               }
               
            }
        
            my_link_queue.addAll(modified_list);

            try
            {
                List<String> valid_words = getWordList(html);
                
                my_word_queue.put(valid_words);
                my_embedded_urls.put(modified_list);
            } catch (InterruptedException e)
            {
                //With any luck we won't end up here.
                e.printStackTrace();
            }
        
       
    }
    

    
    @Override
    /**
     * Runs the page parser, and populates my_list with its results (in urls.)
     * Also handles updating the other components, and poisoning them when it finishes.
     */
    public void run()
    {

        double start = System.nanoTime();
        while (!getIsFinished())
        {

            //System.out.println("Starting page parsing!");
            parse();
            my_pages_parsed++;
            //System.out.println("Ending page parsing!");
        }
        double end = System.nanoTime();
        double total_time = end-start;
        //System.out.println("Finished parsing");
        List<String> poison = new ArrayList<String>();
        poison.add("POISON");
        my_word_queue.add(poison);
        my_embedded_urls.add(poison);
        total_time = (total_time / Math.pow(10, 9));
        MultiThreadSetup.avg_parse_time = (total_time /(double) MultiThreadSetup.page_counter);

        
    }
    /**
     * Gets a word list sans HTML tags and sans javascript.
     * @param html The html
     * @return A list of words in the html not related to tags or javascript.
     */
    public static List<String> getWordList(String html)
    {
        String html_sans_tags = ""; //remove all things between < and >
        boolean remove = false;
        
        String html_sans_javascript = html;
        while(html_sans_javascript.indexOf("<script") != -1)
        {
            int end_index = html_sans_javascript.indexOf("</script>") + "</script>".length();
            html_sans_javascript = html_sans_javascript.substring(0, html_sans_javascript.indexOf("<script"))
                + html_sans_javascript.substring(end_index, html_sans_javascript.length());
                       
        }
        while(html_sans_javascript.indexOf("<style") != -1)
        {
            int end_index = html_sans_javascript.indexOf("</style>") + "</style>".length();
            html_sans_javascript = html_sans_javascript.substring(0, html_sans_javascript.indexOf("<style"))
                + html_sans_javascript.substring(end_index, html_sans_javascript.length());
                       
        }
    
        for (char c : html_sans_javascript.toCharArray()){
            if (c == '<'){
                remove = true;
                html_sans_tags+= " ";
            }
            if (!remove){
                html_sans_tags += c;
            }
            if (c == '>'){
                remove = false;
                html_sans_tags+= " ";
            }
            
        }
        
        List<String> words = Arrays.asList(html_sans_tags.split("[ .,!?\\\"\\'\\\\]"));
        List<String> valid_words = new ArrayList<String>();
        for (String word : words){
            if (word.trim().equals(""))
            {
                continue;
            }
            valid_words.add(word);
            
        }
        return valid_words;
    }
    /**
     * Returns whether or not the parser is waiting for someone to add something to its queue.
     * @return
     */
    public boolean getIsWaiting(){
        return is_waiting;
    }
    /**
     * Checks to see if it's reached the page limit.
     * @return 
     */
    public boolean getIsFinished() //We've reached the page limit.
    {
        return my_page_limit<=my_pages_parsed && page_limited;
    }
}