package org.seaurchin.crawler;

// File Name: WebCrawler.java
//Tao Chen 

import java.text.*;
import java.util.*;
import java.net.*;
import java.io.*;
import org.seaurchin.database.*;

public class WebCrawler  {
    public static final String SEARCH = "Search";
    public static final String STOP = "Stop";
    public static final String DISALLOW = "Disallow:";
    public static final  int    SEARCH_LIMIT=20000;
      

    	// initialize search data structures
    	Vector vectorToSearch = new Vector();
    	Vector vectorSearched = new Vector();
    	Vector vectorMatches = new Vector();
    	
    	// ***************** main() ********************************

    	public static void main (String argv[]){
            

/*		Behind a firewall set your proxy and port here!
*/
            Properties props= new Properties(System.getProperties());
            props.put("http.proxySet", "true");
    	props.put("http.proxyHost", "webcache-cup");
    	props.put("http.proxyPort", "8080");

            Properties newprops = new Properties(props);
            System.setProperties(newprops);
            WebCrawler cw = new WebCrawler();
            cw.crawl();
            
            return;
                    
    }
    	
    	// ************************  crawl()  **************************
        

        public void crawl() {
        	
        	// Setup input stream as a file, the feedlist.txt
        	File file =new File("c:/Seachurin/conf/FeedList.txt");
        	FileWriter fw=null;
        	FileWriter fw1=null;
        	BufferedReader in=null;
        	try{
    			FileReader fr = new FileReader(file);
    			in =new BufferedReader(fr);
    		}catch (FileNotFoundException e){
    			System.out.println("File Disappeared!");
    		}
    		// SETUP OUTPUT STREAM as a file index.txt
    		
    		File out1 =new File("c:/Seachurin/crawldb/index.txt");
      		 try {
               	fw1 = new FileWriter(out1);
               } catch (IOException e){
               	System.out.println("Can not open stream fw");
               }
               
               PrintWriter pw1 = new PrintWriter(fw1);
    		 
    		
    		// initialize search data structures
        	vectorToSearch.removeAllElements();
        	vectorSearched.removeAllElements();
        	vectorMatches.removeAllElements();
        	
        	// set default for URL access
        	URLConnection.setDefaultAllowUserInteraction(false);
        	
    	String strURL = null;
    	int numberSearched = 0;
    	int numberFound = 1;


    	try{
    		while ((strURL=in.readLine())!=null)
    	
			vectorToSearch.addElement(strURL);
    	} catch (IOException e){
    		System.out.println(" in.readLine() errors!");
    	}

    	while (vectorToSearch.size() > 0) 
    	 {
    	    // get the first element from the to be searched list
    	    strURL = (String) vectorToSearch.elementAt(0);
    	    if (strURL.length() == 0) {
        	    System.out.println("ERROR: must enter a starting URL");
        	    return;
        	}

    	    System.out.println("searching " + strURL);
    	    
    	    

    	    URL url;
    	    String str1=strURL.concat("/siteinfo.xml");
    	    String str2=strURL.concat("/index.html");
    	    String category=null;
    	    
    	    // Check siteinfo.xml by call function checkSiteInfo(String s)
    	    
    	    if ((category=checkSiteInfo(str1))==null) break;
    	    else{
    	    	//category=
    	    	//link=
    	    	//filename=
    	    }
    	    try { 
    		url = new URL(str2);
    	    } catch (MalformedURLException e) {
    		System.out.println("ERROR: invalid URL " + strURL);
    		break;
    	    }

    	    // mark the URL as searched (we want this one way or the other)
    	    vectorToSearch.removeElementAt(0);
    	    vectorSearched.addElement(str2);

    	    // can only search http: protocol URLs
    	    if (url.getProtocol().compareTo("http") != 0) 
    		break;

    	    // test to make sure it is before searching
    	    if (!robotSafe(url))
    		break;

    	    try {
    		// try opening the URL
    		URLConnection urlConnection = url.openConnection();

    		urlConnection.setAllowUserInteraction(false);

    		InputStream urlStream = url.openStream();
    		String type 
    		  = urlConnection.getContentType();
    		System.out.println(" File Type is: "+type);
    		if (type == null)
    		    break;

    		// search the input stream for links
    		// first, read in the entire URL
    		byte b[] = new byte[1000];
    		int numRead = urlStream.read(b);
    		String content = new String(b, 0, numRead);
    		while (numRead != -1) {
    		    
    		    numRead = urlStream.read(b);
    		    if (numRead != -1) {
    			String newContent = new String(b, 0, numRead);
    			content += newContent;
    		    }
    		}
    		urlStream.close();
    		String fileName="c:/Seachurin/crawldb/doc"+numberFound+".txt";
    		
    		File out =new File(fileName);
   		 try {
            	fw = new FileWriter(out);
            	PrintWriter pw = new PrintWriter(fw);
        		pw.println(content);
        		fw.close();
            } catch (IOException e){
            	System.out.println("Can not open stream fw");
            }
            
            System.out.println(content);
            
    		pw1.print(fileName+"\t");
    		pw1.print(str2+"\t");
    		pw1.print(category);
    		pw1.println();
    		fw1.close();
    		
    		 numberSearched++;
    		
    		// 
    		String lowerCaseContent = content.toLowerCase();

    		int index = 0;
    		while ((index = lowerCaseContent.indexOf("<a", index)) != -1)
    		{
    		    if ((index = lowerCaseContent.indexOf("href", index)) == -1) 
    			break;
    		    if ((index = lowerCaseContent.indexOf("=", index)) == -1) 
    			break;
    		    
    		    index++;
    		    String remaining = content.substring(index);

    		    StringTokenizer st 
    		      = new StringTokenizer(remaining, "\t\n\r\">#");
    		    String strLink = st.nextToken();

    		    URL urlLink;
    		    try {
    			urlLink = new URL(url, strLink);
    			strLink = urlLink.toString();
    		    } catch (MalformedURLException e) {
    			System.out.println("ERROR: bad URL " + strLink);
    			continue;
    		    }

    		    // only look at http links
    		    if (urlLink.getProtocol().compareTo("http") != 0)
    			break;


    		    try {
    			// try opening the URL
    			URLConnection urlLinkConnection 
    			  = urlLink.openConnection();
    			urlLinkConnection.setAllowUserInteraction(false);
    			InputStream linkStream = urlLink.openStream();
    			String strType 
    			  = urlLinkConnection.guessContentTypeFromStream(linkStream);
    			linkStream.close();

    			// if another page, add to the end of search list
    			if (strType == null)
    			    break;
    			if ((strType.compareTo("text/html")==0)||(strType.compareTo("application/pdf") == 0)) {
    			    // check to see if this URL has already been 
    			    // searched or is going to be searched
    			    if ((!vectorSearched.contains(strLink)) 
    			      && (!vectorToSearch.contains(strLink))) {

    				// test to make sure it is robot-safe!
    				if (robotSafe(urlLink))
    				    vectorToSearch.addElement(strLink);
    			    }
    			}

    			// if the proper type, add it to the results list
    			// unless we have already seen it
    			if ((strType.compareTo("text/html")==0)||(strType.compareTo("application/pdf") == 0)) {
    			    if (vectorMatches.contains(strLink) == false) {
    				vectorMatches.addElement(strLink);
    				numberFound++;
    				if (numberFound >= SEARCH_LIMIT)
    				    break;
    			    }
    			}
    		    } catch (IOException e) {
    			System.out.println("ERROR: couldn't open URL " + strLink);
    			continue;
    		    }
    		}
    	    } catch (IOException e) {
    		System.out.println("ERROR: couldn't open URL " + strURL);
    		break;
    	    }

    	    numberSearched++;
    	    if (numberSearched >= SEARCH_LIMIT)
    		break;
    	}

    	if (numberSearched >= SEARCH_LIMIT || numberFound >= SEARCH_LIMIT)
    	    System.out.println("reached search limit of " + SEARCH_LIMIT);
    	else
    	    System.out.println("done");
        }
    
       //***********************   robotsafe(URL)   *********************************
       public boolean robotSafe(URL url){
       	String strHost = url.getHost();

       	// form URL of the robots.txt file
       	String strRobot = "http://" + strHost + "/robots.txt";
       	URL urlRobot;
       	try { 
       	    urlRobot = new URL(strRobot);
       	} catch (MalformedURLException e) {
       	    // something weird is happening, so don't trust it
       	    return false;
       	}

       	String strCommands;
       	try {
       	    InputStream urlRobotStream = urlRobot.openStream();

       	    // read in entire file
       	    byte b[] = new byte[1000];
       	    int numRead = urlRobotStream.read(b);
       	    strCommands = new String(b, 0, numRead);
       	    while (numRead != -1) {
       		
       		numRead = urlRobotStream.read(b);
       		if (numRead != -1) {
       		    String newCommands = new String(b, 0, numRead);
       		    strCommands += newCommands;
       		}
       	    }
       	    urlRobotStream.close();
       	} catch (IOException e) {
       	    // if there is no robots.txt file, it is OK to search
       	    return true;
       	}

       	// assume that this robots.txt refers to us and 
       	// search for "Disallow:" commands.
       	String strURL = url.getFile();
       	int index = 0;
       	while ((index = strCommands.indexOf(DISALLOW, index)) != -1) {
       	    index += DISALLOW.length();
       	    String strPath = strCommands.substring(index);
       	    StringTokenizer st = new StringTokenizer(strPath);

       	    if (!st.hasMoreTokens())
       		break;
       	    
       	    String strBadPath = st.nextToken();

       	    // if the URL starts with a disallowed path, it is not safe
       	    if (strURL.indexOf(strBadPath) == 0)
       		return false;
       	}

       	return true;
      }
// ******************** boolean checkSiteInfo(String) *********************************
       
       public String checkSiteInfo(String st1){
    	   
    	   String content=null;
    	   //Site info
    	   String name;
    		String description;
    		String service_type;
    		String service_url;
    		String access_info;
    		PublisherInfo publisher;
    		SiteCategories[] categories;

    		// Publisher info
    		
    		String contact_name;
    		String title;
    		String affiliation;
    		String phone;
    		String email;
    		
    		//Category info

    		String categoryname;
    		String category;
    		String taxonomyschema;

    	
    	   
    	   try{
    			URL u= new URL(st1);
    			URLConnection uc = u.openConnection();
    			
    		        System.out.println("Content Type is: " + uc.getContentType());
    		        
    		        if(uc.getContentType().compareTo("application/xml")!=0)
    		        	return null;
    		        
    			InputStream raw =uc.getInputStream();
    			
    			byte b[] = new byte[1000];
    		    		int numRead = raw.read(b);
    		    		content = new String(b, 0, numRead);
    		    		while (numRead != -1) {
    		    		    
    		    		    numRead = raw.read(b);
    		    		    if (numRead != -1) {
    		    			String newContent = new String(b, 0, numRead);
    		    			content += newContent;
    		    		    }
    					
    					
    		    		}
    				
    			}catch (IOException  e) {

    				System.out.println("Can not open the URLSTREAM");
    				}
    			
    			System.out.println(content);

    				 int index1, index2, index3= 0;
    				 
    				 
    				 //Parse Name
    		    	index1 = content.indexOf("<Name>");
    				 index2 =index1+6;
    				 index3=content.indexOf("</Name>");

    			System.out.println("The Name is: "+content.substring(index2,index3));
    			name=content.substring(index2,index3);
    			String st = content.substring(index3+7);
    			
    			// search			
    			description=searchString(st,"Description");
        		service_type=searchString(st, "ServiceType");
        		service_url=searchString(st, "ServiceUrl");
        		access_info=searchString(st, "AccessInfo");
        		// Publisher info
        		
        		contact_name=searchString(st, "ContactName");
        		title=searchString(st, "Title");
        		affiliation=searchString(st, "Affiliation");
        		phone=searchString(st, "Phone");
        		email=searchString(st, "Email");
        		
        		//Category info

        		categoryname=searchString(st, "Name");
        		category=searchString(st, "Category");
        		taxonomyschema=searchString(st, "TaxonomySchema");
        		
        		// update Database with siteinfo.xml
        		
        		 persist.BeginTrans();
        	   		
        	   		SiteInfo bob = new SiteInfo(name, description, service_type, service_url,access_info);
        	   		persist.StoreSiteInfo(bob);
        	   		PublisherInfo bob2 = new PublisherInfo( contact_name, title, affiliation, phone, email);
        	   		persist.StorePublisherInfo(bob2);
        	   		SiteCategories bob3 = new SiteCategories(categoryname, category, taxonomyschema);
        	   		persist.StoreSiteCategories(bob3);
        	   		persist.CommitTrans();    		
    			
    			return category;
       }  // end of checkSiteInfo
   // ************************ searchString() ************    
       private String searchString(String st, String subStr){	   
    	   int index1, index2, index3 = 0;
			 
	    	index1 = st.indexOf("<"+subStr+">");
			 index2 =index1+2+subStr.length();
			 index3=st.indexOf("</"+subStr+">");

		System.out.println("The element is: "+st.substring(index2,index3));
		return st.substring(index2,index3);
		   	   
       }

 }//end of the class WebCrawler

