
package hyperlinks.crawler;

import java.awt.TextArea;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashSet;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 *
 * @author madhu
 * date : April 7, 2012
 */


/*
 * Fetchpage class is designed to fetch and store the fetched data of Homepage
 * (Only home pages) of a website in "Elements" objects (media,links,imports).
 * Objects:
 * links :stores all Hyperlinks of the html page which is downloaded by "Jsoup.connect()" 
 *        statement.(EX:<a href="http://xx.html">)
 * media:stores all image sorces of the html page which is downloaded by "Jsoup.connect()" 
 *        statement. (Ex:<img src="xx.jpg" width= height=>)
 * imports:stores all imports (.css,.js files) of the html page which is downloaded by "Jsoup.connect()" 
 *         statement.(Ex:<link rel="" src="./*.css">)
 * Here another object "otherurls" of type ArrayList stores the urls that are refered to
 * the pages other than present website. (Ex: a url in www.abc.com refered to www.xyz.com)
 * 
 */   
public class FetchPage{
    
      URL url;
      Document file;
      Elements media;
      Elements links,imports;
      ArrayList<String> otherurls=new ArrayList();
      URL newurl;
    
   public FetchPage(URL url)
       {
          try {
                  this.url=url;             
                  this.file=Jsoup.connect(url.toString()).userAgent("mozilla").get();
                  this.media=file.getElementsByTag("img");
                  this.links = file.getElementsByTag("a");
                  this.imports=file.getElementsByTag("link");
           
               } catch (IOException ex) {
                   
                        String err="Webcrawler cannot establish connection to "+url+" \n";
                        err+="Problem may be one of the following:\n";
                        err+="No Internet connection... (or)\nConnection is timed out (or) \n";
                        err+="server is not responding";
                        System.out.println(err);
                       // return ;
                  }
            }  //constructor ends here
                 
   /*
    * mediaLinks() method returns all image links in supplied "url" page.
    * the object "media" of type Elements contains all image urls in a page.  
    */
       public Elements mediaLinks(){
       return media;
       }
       
       /*
        * The otherUrls() method is very impotant because it eleminates duplicate
        * urls and returns the urls thatt refered to ther website home pages.
        */
       
       public ArrayList otherUrls() throws MalformedURLException
       {        
           
        try{
         
              for(Element eachurl : links)
                {         
                   String href=eachurl.attr("href");            
                
                   /*
                    * if the fetched urls doesnot start with "http" leave that and goto next url..
                     * continue; statement will do that
                    * if everything is fine convert the String "eachurl" to URL and add to ArrayList obj. 
                   */
                   
                   if(!href.startsWith("http") || href.startsWith("https"))
                      continue;
      
                   newurl=new URL(href);
                   //concat protocal (http(s)) and "//" and sitename
                   String newone=newurl.getProtocol()+"://"+newurl.getHost();
                   otherurls.add(newone);
                 }
             
             } catch(Exception exec){ 
            System.out.println("exception occured"); 
           }  
            
               removeDuplicates(otherurls); // call this method to remove duplicates in arraylist
               return otherurls;  // return arraylist
       }
       
       /*
        * removeDuplicates() method is very simple but works effeciantly.
        * it recieves arraylist as parameter and adds to HashSet(which dont allow duplicates)
        * and clears the contents of otherurls object and adds all the elements in the HashSet.
        */
       public void removeDuplicates(ArrayList arlist){
               HashSet hs=new HashSet(arlist);
               otherurls.clear();
               otherurls.addAll(hs);
       }
       
       /*
        * hrefLinks() method returns all href links in a page which includes all
        * sublinks in the website and other links.
        * and dont consider the urls that starts with # and javascript, 
        * i.e it eleminates unneccessary links and blank links.
        */
       
       public ArrayList hrefLinks()
       {
                ArrayList<String> all_links =new ArrayList();     
                try{
                     for(Element link : links)
                       {
                           String src=link.attr("href");
                           if(src.startsWith("#") || src.startsWith("javascript"))
                               continue;
                           else
                           all_links.add(src);
                       } 
                 }catch(Exception inlinks){
                         System.out.println("Exception occured while extracting links..");
                     }
            return all_links;
         }
       
       /*
        * returns all imports ie imported files like css ,js ,vb etc
        */
       public Elements importLinks()
       {
         return imports;
       }
}
       
