import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;

import websphinx.Link;
import websphinx.Page;


public class UrlHarvestingCrawler extends MyCrawler {

	private HashMap<Integer,ExtendedLink> webLinks = new  HashMap<Integer, ExtendedLink>();
	private static final String URL_PATTERN = "^(ftp|http|file)://([^/]+)(/.*)?(/.*)";

	public UrlHarvestingCrawler(String url) throws IOException {
		super(url);
	}

	private static final long serialVersionUID = -5212322259234387151L;

	@Override
	protected void doVisit(Page page) {
		if (this.getPagesVisited()>99){
			logFile.info("FinishedCrawling. Collected " + this.getPagesVisited() + " urls");
			this.stop();
		}
		URL url  = page.getURL();
		ExtendedLink el=null;
		try {
			el = new ExtendedLink(new Link(url));
		} catch (MalformedURLException e) {
			logFile.error(e.getMessage());
		}
		webLinks.put(url.toString().hashCode(), el);
		logFile.info("Saved "+url);
	}
	@Override
	public boolean shouldVisit(Link link){
		String name = link.getFilename();
		if ( (name!=null) && ((name.endsWith("png")) ||  
				(name.endsWith("jpg"))||
				(name.endsWith("pdf")) ||
				(name.endsWith("ico")) ||
				(name.endsWith("css"))||
				(name.endsWith("gif")) ||
				(name.endsWith("js"))) ){
			logFile.info("This link doesn't point to an HTML page");
			return false;
		}
		
		String baseHostName = this.getCrawledRoots()[0].getHost().substring(
				this.getCrawledRoots()[0].getHost().indexOf('.')+1,this.getCrawledRoots()[0].getHost().lastIndexOf('.'));
		if ( (link.getHost().indexOf(baseHostName)) > -1)
			return true;
/*		
		
		String testedHostName = link.getHost().substring(link.getHost().indexOf('.'));
		String baseHostName = this.getCrawledRoots()[0].getHost().substring(
				this.getCrawledRoots()[0].getHost().indexOf('.'));

		if (!testedHostName.equals(baseHostName)){
			logFile.info("Not the same host");
			return false;
		}*/
		return false;
	}
	
	public HashMap<Integer, ExtendedLink> getWebLinks() {
		return webLinks;
	}

	public static void main(String[] argv) {
		try {
			//UrlHarvestingCrawler crawler = new UrlHarvestingCrawler(["http://www.yahoo.com","www.google.com"]);
			//crawler.run();
		} catch (Exception e) {
			e.printStackTrace();
		} 
	}

}
