package com.barkerton.crawler.util;

import java.util.Iterator;
import java.util.Map;
import java.util.HashMap;
import java.util.TreeMap;
import java.util.SortedMap;
import java.util.List;
import java.util.ArrayList;
import java.util.regex.*;
import java.net.MalformedURLException;
import java.net.URL;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

/**
 * Class handles URL normalization (URL canonicalization).
 * It modifies and standardizes URLs in a consistent manner.
 * Normalization is used to determine if two URLs are 
 * syntactically different or equivalent.  Additionally, it 
 * allows for the ability to extract a SITE from a unique URL.
 * ex: URL www.cnn.com/test/stuff/me?youdog is the site cnn.com
 * 
 * @author CJ_Barker
 * 
 */
public class CrawlerUrl {
	
	// static references to valid types
	private static final Log log = LogFactory.getLog(CrawlerUrl.class);
	private static final Map<String, String> defaultPorts = new HashMap<String, String>();
	private static final List<String> relativeSchemes = new ArrayList<String>();
	private static final List<String> serverAuthoritySchemes = new ArrayList<String>();
	private static final List<String> validQueryVariables = new ArrayList<String>();
	
	static {
		defaultPorts.put("http", "80");
		defaultPorts.put("https", "443");
		defaultPorts.put("gopher", "70");
		defaultPorts.put("news", "119");
		defaultPorts.put("snews", "563");
		defaultPorts.put("nntp", "119");
		defaultPorts.put("snntp", "563");
		defaultPorts.put("ftp", "21");
		defaultPorts.put("telnet", "23");
		defaultPorts.put("prospero", "191");
		
		relativeSchemes.add("http");
		relativeSchemes.add("https");
		relativeSchemes.add("news");
		relativeSchemes.add("snews");
		relativeSchemes.add("nntp");
		relativeSchemes.add("snntp");
		relativeSchemes.add("ftp");
		relativeSchemes.add("file");
		
		serverAuthoritySchemes.add("http");
		serverAuthoritySchemes.add("https");
		serverAuthoritySchemes.add("news");
		serverAuthoritySchemes.add("snews");
		serverAuthoritySchemes.add("ftp");
		
		validQueryVariables.add("user");
		validQueryVariables.add("aspxerrorpath");
		validQueryVariables.add("id");
		validQueryVariables.add("closeurl");
		validQueryVariables.add("url");
		validQueryVariables.add("returnurl");
		validQueryVariables.add("siteusername");
		validQueryVariables.add("objectid");
		validQueryVariables.add("type");
		validQueryVariables.add("q");
		validQueryVariables.add("logclick");
		validQueryVariables.add("frompage");
	}
	
	// instance variables
	private String url;
	private String normalizedUrl;
	private String site;
	
	/**
	 * Private constructor - only allow use with constructor that 
	 * takes a url string parameter.
	 */
	private CrawlerUrl() {}
	
	/**
	 * Constructor that will automatically normalize a URL 
	 * upon object creation.
	 */
	public CrawlerUrl(String url) {
		log.info("URL: " + url);
		this.url = format(url);
		this.normalizedUrl = normalize();
		log.info("Normalized URL: " + normalizedUrl);
	}
	
	/**
	 * Method formats a URL by capitalizing letters in escape sequences.
	 * E.g. http://example.com/a%c2%b1b becomes http://example.com/a%C2%B1b
	 * 
	 * @param url	URL to be formatted
	 * @return		Formatted URL
	 */
	private String format(String url) {
		StringBuffer buffer = new StringBuffer(url.trim().toLowerCase());
		Pattern pattern = Pattern.compile("%.");
        Matcher matcher = pattern.matcher(buffer);

        String subString = null;
        while (matcher.find()) {
        	//System.out.println("Found text \""+ matcher.group()+"\" starting at " +
        		//	"index "+matcher.start()+" and the ending at index "+matcher.end());
        	
        	subString = buffer.substring(matcher.start(), matcher.end()).toUpperCase();
        	buffer.delete(matcher.start(), matcher.end());
        	buffer.insert(matcher.start(), subString);
        }
        
		return buffer.toString();
	}
	
	/**
	 * Normalizes a URL.  Method wrapper to static normalize.
	 * @return		Normalized result
	 */
	private String normalize() {
		return normalize(this.url);
	}
	
	/**
	 * Normalizes a URL.
	 * 
	 * @param url	URL to be normalized
	 * @return		Normalized result
	 */
	public static String normalize(String strUrl) {
		String normalizedUrl = null;
		
		URL url;
		
		try {
			url = new URL(strUrl.trim().toLowerCase());
			
			if (log.isDebugEnabled()) {
				log.debug("URL: " + strUrl);
				log.debug("Host:" + url.getHost());
				log.debug("File: " + url.getFile());
				log.debug("Path: " + url.getPath());
				log.debug("Query: " + url.getQuery());
				log.debug("UserInfo: " + url.getUserInfo());
			}
			
			// apply URL rewrite rules, thus normalizing it
			normalizedUrl = addTrailingSlash(url);
			url = new URL(normalizedUrl);
			
			normalizedUrl = removeDirectoryIndex(url);
			url = new URL(normalizedUrl);
			
			normalizedUrl = removePort(url);
			url = new URL(normalizedUrl);
			
			normalizedUrl = removeFragment(url);
			url = new URL(normalizedUrl);
			
			normalizedUrl = removeDotSegments(url);
			url = new URL(normalizedUrl);
			
			normalizedUrl = removeWWW(url);
			url = new URL(normalizedUrl);
			
			normalizedUrl = sortActiveVariables(url);
			url = new URL(normalizedUrl);
			
			normalizedUrl = removeArbitraryQueryVariables(url);
			url = new URL(normalizedUrl);
			
			normalizedUrl = removeQuestionMarkOnEmpty(url);
			url = new URL(normalizedUrl);
		} 
		catch (MalformedURLException mfue) {
			return null;
		}

		if (log.isDebugEnabled())
			log.debug("Normalized: " + url);
				
		return url.toString();
	}
	
	/**
	 * Method rewrites URL by adding a trailing slash when URL
	 * has no path.
	 * 
	 * @param url	URL to be rewritten
	 * @return		Rewritten URL with trailing slash added, if possible
	 */
	public static String addTrailingSlash(URL url) {
		StringBuffer buffer = new StringBuffer(url.toString());
		
		// see if file path associated with URL - if so no rewrite
		if (url.getPath() != null && !url.getPath().trim().equals("")) {
			return buffer.toString();
		}
		
		Pattern pattern = Pattern.compile("\\.");
        Matcher matcher = pattern.matcher(buffer.toString());
    
        while (matcher.find()) {
        	buffer.insert(url.toString().length(), "/");
        	break;	// added to the end of the URL - done don't care if there's more dots
        }
		
		return buffer.toString();
	}
	
	/**
	 * Method rewrites URL by removing the directory index.
	 * E.g. http://example.com/a/index.html --> http://example.com/a
	 * 
	 * @param url	URL to be rewritten
	 * @return		Rewritten URL with directory index removed, if possible
	 */
	public static String removeDirectoryIndex(URL url) {
		StringBuffer buffer = new StringBuffer(url.toString());
		
		// see if file path associated with URL - if NOT no rewrite
		if (url.getFile() == null || url.getFile().trim().equals("")) {
			return buffer.toString();
		}
		
		Pattern pattern = Pattern.compile("(index|default)\\.(html|htm|asp|jsp|php|cfm|pl|rb|py)");
        Matcher matcher = pattern.matcher(buffer.toString());
    
        while (matcher.find()) {
        	buffer.delete(matcher.start(), matcher.end());
        }
		
		return buffer.toString();
	}
	
	/**
	 * Determines if default port exists within URL and removes it
	 * 
	 * @param url	URL to examine for default port
	 * @return		Rewrites URL string removed of the default port
	 */
	public static String removePort(URL url) {
		if (defaultPorts.containsKey(url.getProtocol()) &&
				defaultPorts.get(url.getProtocol()).equals(Integer.toString(url.getPort()))) 
		{
			StringBuffer buffer = new StringBuffer(url.toString().trim().toLowerCase());
			Pattern pattern = Pattern.compile(":"+url.getPort());
	        Matcher matcher = pattern.matcher(url.toString().trim().toLowerCase());

	        while (matcher.find()) {
	        	buffer.delete(matcher.start(), matcher.end());
	        }
	        
	        return buffer.toString();
		}
		
		return url.toString();
	}
	
	/**
	 * Removes the fragment component of a URL
	 * http://www.example.com/bar.html#section1  http://www.example.com/bar.html
	 *  
	 * @param url	URL to be formatted
	 * @return		Rewrites URL string removed of the fragment
	 */
	public static String removeFragment(URL url) {		    
        StringBuffer buffer = new StringBuffer(url.toString());
		
		Pattern pattern = Pattern.compile("#");
        Matcher matcher = pattern.matcher(buffer.toString());
    
        while (matcher.find()) {
        	buffer.delete(matcher.start(), url.toString().length());
        }
		
		return buffer.toString();
	}
	
	/**
	 * Method rewrites URL by removing dot segments ../ or ./ within a URL
	 * 
	 * @param url	URL to be formated
	 * @return		Rewrites URL string removed dot segments
	 */
	public static String removeDotSegments(URL url) {		
		StringBuffer buffer = new StringBuffer();
		
		Pattern pattern = Pattern.compile("\\.\\./|\\./");
        Matcher matcher = pattern.matcher(url.toString().trim().toLowerCase());
    
        int start = 0;
        while (matcher.find()) {        	
        	buffer.append(url.toString().substring(start, matcher.start()));
        	start = matcher.end();
        }
        
        buffer.append(url.toString().substring(start, url.toString().length()));
        
		return buffer.toString();
	}
	
	/**
	 * Method removes the "www" as the first domain label.
	 * 
	 * @param url	URL to be formated
	 * @return		Rewrites URL string removed www domain label
	 */
	public static String removeWWW(URL url) {	
		StringBuffer buffer = new StringBuffer(url.toString());
		
		Pattern pattern = Pattern.compile("//www.");
        Matcher matcher = pattern.matcher(buffer.toString());
    
        while (matcher.find()) {
        	buffer.delete(matcher.start()+2, matcher.end());
        }
		
		return buffer.toString();
	}
	
	/**
	 * Method will determine if it's an active web page that has more than 
	 * one variable in the URL. Will remove the variables with their data, 
	 * sort them into alphabetical order (by variable name), and reassemble 
	 * the URL.
	 * 
	 * @param url	URL to examine for active page variables
	 * @return		Rewrites URL string active page sorted variables
	 */
	public static String sortActiveVariables(URL url) {
		StringBuffer buffer = new StringBuffer(url.toString());
		
		// see if query even exists to process
		if (url.getQuery() == null || url.getQuery().trim().equals("")) {
			return buffer.toString();
		}
		
		// parse out ALL name/value pairs to sort
		String[] nvPairs = url.getQuery().split("(&)");
		
		// individually parse name/value pair and load it into map
		SortedMap<String, String> sortedMap = new TreeMap<String, String>();
		
		for (int i=0; i < nvPairs.length; i++) {
			String[] nvPair = nvPairs[i].split("(=)");
			
			if (nvPair.length >= 1) {
				if (nvPair.length == 1)
					sortedMap.put(nvPair[0], "");
				else
					sortedMap.put(nvPair[0], nvPair[1]);
			}
		}
		
		// remove old query values 
		Pattern pattern = Pattern.compile(url.getQuery());
        Matcher matcher = pattern.matcher(buffer.toString());
    
        while (matcher.find()) {
        	buffer.delete(matcher.start(), matcher.end());
        }
        
        // re-applied sorted variables as the query
        String key = null; boolean first = true;
        Iterator<String> i = sortedMap.keySet().iterator();
        while(i.hasNext()) {
        	int start = buffer.length();
        	key = (String)i.next();
        	if (first) {
        		buffer.insert(start, key + "=" + sortedMap.get(key));
        		first = false;
        	}
        	else {
        		buffer.insert(start, "&" + key + "=" + sortedMap.get(key));
        	}
        }         
        
		return buffer.toString();
	}
	
	/**
	 * Method will remove arbitrary query variables.
	 * 
	 * @param url	URL to examine for arbitrary variables
	 * @return		Rewrites URL string with no arbitrary variables
	 */
	public static String removeArbitraryQueryVariables(URL url) {	
		StringBuffer buffer = new StringBuffer(url.toString());
		
		// see if query even exists to process
		if (url.getQuery() == null || url.getQuery().trim().equals("")) {
			return buffer.toString();
		}
		
		// parse out ALL name/value pairs to sort
		String[] nvPairs = url.getQuery().split("(&)");
		Map<String, String> map = new HashMap<String, String>();
		
		for (int i=0; i < nvPairs.length; i++) {
			String[] nvPair = nvPairs[i].split("(=)");
			
			if (nvPair.length == 1) {
				map.put(nvPair[0], "");
			}
			else if (nvPair.length >= 1) {
				// drop invalid arbitrary query variables
				if (!validQueryVariables.contains(nvPair[0].toLowerCase()))
					continue;
				
				map.put(nvPair[0], nvPair[1]);
			}
		}
		
		// remove old query values 
		Pattern pattern = Pattern.compile(url.getQuery());
        Matcher matcher = pattern.matcher(buffer.toString());
    
        while (matcher.find()) {
        	buffer.delete(matcher.start(), matcher.end());
        }
		
        // re-applied remaining variables as the query
        String key = null; boolean first = true;
        Iterator<String> i = map.keySet().iterator();
        while(i.hasNext()) {
        	int start = buffer.length();
        	key = (String)i.next();
        	if (first) {
        		if (map.get(key).trim().equals(""))	// ignore key value 
        			buffer.insert(start, key);
        		else
        			buffer.insert(start, key + "=" + map.get(key));
        		first = false;
        	}
        	else {
        		buffer.insert(start, "&" + key + "=" + map.get(key));
        	}
        }
		
		return buffer.toString();
	}
	
	/**
	 * Method will determine if URL ends in ? and remove it.
	 * 
	 * @param url	URL to examine for ?
	 * @return		Rewrites URL string without ending ?
	 */
	public static String removeQuestionMarkOnEmpty(URL url) {	
		StringBuffer buffer = new StringBuffer(url.toString());
		
		// see if query even exists to process
		if (url.getFile() == null || url.getFile().trim().equals("")) {
			return buffer.toString();
		}
		
		Pattern pattern = Pattern.compile("\\?$");
        Matcher matcher = pattern.matcher(buffer.toString());
    
        while (matcher.find()) {
        	buffer.delete(matcher.start(), matcher.end());
        }
		
		return buffer.toString();
	}
	
	// Accessor-Getter methods below
	public String getUrl() {
		return this.url;
	}
	
	public String getNormalizedUrl() {
		return this.normalizedUrl;
	}
	
	public String getSite() {
		return this.site;
	}
	
	/*
	 * Simple sanity "smoke" test
	 */
	public static void main(String[] args) {
		CrawlerUrl url = new CrawlerUrl("http://www.example.com:80/../a/b/../c/./d.htm#stuff%3a");
		//CrawlerUrl url = new CrawlerUrl("http://www.example.com/bar.html#section1");
		//CrawlerUrl url = new CrawlerUrl("http://www.example.com/display?");
	}
}

