package com.barkerton.crawler.parser;

import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

/**
 * Class parses text files via tokens deleminated by white space.
 * Eliminates stop words and other non-relevant search criteria.
 * 
 * @author c.barker
 *
 */
public class Parser {

	private static Log log = LogFactory.getLog(Parser.class);
	
	// URL file extensions to ignore when extracting links
	private static ArrayList<String> invalidLinkExt = new ArrayList<String>();
	static {
		invalidLinkExt.add("gif");
		invalidLinkExt.add("css");
		invalidLinkExt.add("jpeg");
		invalidLinkExt.add("jpg");
		invalidLinkExt.add("js");
		invalidLinkExt.add("dtd");
	}
	
	/**
	 * Parses out all anchor tags aka links from the html source.
	 * @param source	HTML source 	
	 * @return Array of URL objects or null
	 */
	public static Object[] extractLinks(String source) {
		ArrayList<URL> links = new ArrayList<URL>();

        // regex URLs
		String buffer = new String(source.trim().toLowerCase());
		Pattern pattern = Pattern.compile("(http[s]?|ftp|gopher|[s]news|nntp|ssntp|telnet|prospero)://[a-zA-Z0-9._-]+(:[0-9]+)?(/?[@?=+&!*$%]?[a-zA-Z0-9._-]*)*");
        Matcher matcher = pattern.matcher(buffer);
        
        log.debug("Extracting links from HTML...");
        
        String url = null;
        while (matcher.find()) {
        	url = buffer.substring(matcher.start(), matcher.end());
        	try {
        		log.debug(url);
        		URL tempUrl = new URL(url);
        		if (isValidLink(tempUrl) && !links.contains(tempUrl))
        			links.add(tempUrl);
        	} 
        	catch (MalformedURLException mue) {
        		log.error("Malformed URL: " + url);
        	}
        }
		
        if (links.isEmpty())
        	return null;
        else
        	return links.toArray();
	}
	
	
	/**
	 * Method simpley filters out files that do not have a valid extension for crawling
	 * like gif, css, jpg.
	 * @param url	URL to validate
	 * @return		Boolean indicating whether or not we want to use this extracted link.
	 */
	private static boolean isValidLink(URL url) {
		String file = url.getFile();
		String extension = file.substring(file.lastIndexOf('.')+1, file.length());
		if (invalidLinkExt.contains(extension))
			return false;
		return true;
	}
	
	/**
	 * Parses out the HTML page's title.
	 * @param source	HTML source 	
	 * @return String title of the page
	 */
	public static String extractTitle(String source) {
		String title = null;
		
		// regex for <title>blah</title>
		String buffer = new String(source.trim().toLowerCase());
		Pattern pattern = Pattern.compile("<( )*(title)( )*>(.*?)( )*/( )*title( )*>");
        Matcher matcher = pattern.matcher(buffer);
        
        String htmlTitle = null;
        while (matcher.find()) {        		
        	htmlTitle = buffer.substring(matcher.start(), matcher.end());
        	title = eliminateTags(htmlTitle);
        	log.debug("Page Title: " + title);
        }

		return title;
	}
	
	/**
	 * Removes all tags (HTML) from source text.
	 * @param source	HTML source to have tags removed from
	 * @return			Plain text without HTML tags
	 */
	public static String eliminateTags(String source) {
		String text = source.replaceAll("\\<.*?\\>", "");
		return text;
	}
	
	/*
	 * Simple sanity "smoke" test
	 */
	public static void main(String[] args) {
		String source = "Hello  ftp://cathog.cn:1921/stuff.html  stuff https://www.utexas.edu morehttp://stuff.com?moredogca.ind/com";
		Parser.extractLinks(source);
	}
}
