package hk.polyu;

import java.io.*;
import java.net.*;
import java.util.*;
import java.util.AbstractMap.*;
import java.util.regex.*;

public class WikiCrawler implements Runnable{
	
	/**
	 * Wikipedia web site URL. constant.
	 */
	public static final String mHost = "http://en.wikipedia.org";
	
	/**
	 * The initial URL to be fetched by the crawler.
	 */
	public String 	mInitURL;
	
	/**
	 * The location of a local folder, where the crawled item will be stored.
	 */
	public String 	mSavePath;
	
	/**
	 * The depth of the crawling level. Will stop after reaching the depth.
	 */
	public int		mDepth = 2;
	
	/**
	 * The data structure storing the fetched category.
	 */
	Tree<SimpleEntry<String, Integer>>	mCategoryTree;
	
	/**
	 * The queue storing the waiting list.
	 */
	Queue<SimpleEntry<String, Integer>>	mWaitingQueue;
	

	/**
	 * Constructor with initial URL and local save path
	 * @param mURL	The initial URL
	 * @param path	The local save path.
	 */
	public WikiCrawler(String mURL, String path) {
		this.mInitURL = mURL;
		this.mSavePath = path;
		
		if (! this.mSavePath.endsWith(File.separator))
			this.mSavePath += File.separator;
		
		this.mWaitingQueue = new LinkedList<SimpleEntry<String, Integer>>();
		
		// add the first element.
		SimpleEntry<String, Integer> e = new SimpleEntry<String, Integer>(mURL.toString(), 0);
		this.mWaitingQueue.add(e);
		
	}
	
	/**
	 * Read the content of a single web page.
	 * @param url		The URL of the web page that will be fetched.
	 * @return		The content in the format of string.
	 */
	private String pageFetcher(String url) {
		
		String content = new String();

		// Open connection to robot file URL for reading.
        BufferedReader reader;
		try {
			URL u = new URL(url);
			reader = new BufferedReader(new InputStreamReader(u.openStream()));
	        String line;
	        while ((line = reader.readLine()) != null) {
	        	content += line + "\r\n";
	        }
		} catch (IOException e) {
			e.printStackTrace();
		}
		// hold on before return
		this.holdon();
		return content;
	}
	
	/***
	 * Save the crawled file to local disk. The path is determined by mSavePath
	 * @param fname		Name of the file.
	 * @param content	Fetched content.
	 */
	private void saveFile(String fname, String content) {
		
		String fullPath = this.mSavePath + fname;
		
		try {
            BufferedWriter out = new BufferedWriter(new FileWriter(fullPath));
            out.write(content);
            out.close();
        }
        catch (IOException e)
        {
            System.out.println("Exception ");       
        }

	}
	
	/**
	 * Sleep a few seconds to avoid being blocked by Server....
	 */
	private void holdon() {
		try {
			Thread.sleep(2000);
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
	}
	
	/**
	 * Use regular expression to extract the URL and 
	 * @param item	String containing the <a></a> of the sub URL
	 * @param level Number of levels.
	 */
	public void appendQueue(String item, int level) {
		// Find URL
		String url = WikiCrawler.urlFromTag(item);
		
		// add the first element.
		SimpleEntry<String, Integer> e = new SimpleEntry<String, Integer>(url, level);
		this.mWaitingQueue.add(e);
	}	
	
	public void crawl() {
		new Thread(this).start();
	} // end crawl.
	
	
	public Tree<SimpleEntry<String, Integer>> getTree() {
		return this.mCategoryTree;
	}
	
	public static String nameFromUrl(String url) {
		return url.substring(url.lastIndexOf(':') + 1);
	}
	
	public static String urlFromTag(String tag) {
		String regUrl = "/wiki/Category:[^\"]+";
		Pattern r = Pattern.compile(regUrl);
		Matcher m = r.matcher(tag);
		if (m.find()) {
			String url = m.group();
			url = WikiCrawler.mHost + url;
			return url;
		}
		return null;
	}
	
	@Override
	public void run() {
		while (! this.mWaitingQueue.isEmpty()) {
			// get an item from head.
			SimpleEntry<String, Integer> e = this.mWaitingQueue.poll();
			String url = e.getKey();
			int level = e.getValue();
			
			if (level ==0) {
				this.mCategoryTree = new Tree<AbstractMap.SimpleEntry<String,Integer>>(e);
			}
			
			// Get the file name from URL.
			String catName = nameFromUrl(url);
			
			// Fetch and save to disk.
			String s = this.pageFetcher(url);
			this.saveFile(catName + ".html", s);
			// Fire Event
			for (OnCrawlListener listener : listeners) {
			    listener.onPageCrawled(e);
			}
			
			if (level > this.mDepth) {
				continue;
			}
			// Find all sub categories using regex.
			String myregex = "<a.+CategoryTreeLabel.+href=\"/wiki/Category:.+</a>";
			Pattern r = Pattern.compile(myregex);
			Matcher m = r.matcher(s);
			while (m.find()) {
				String group = m.group().trim();
				this.appendQueue(group, level+1);
				// Put to retrieved tree.
				SimpleEntry<String, Integer> entry = 
						new SimpleEntry<String, Integer>(WikiCrawler.urlFromTag(group), level+1);
				this.mCategoryTree.addLeaf(e, entry);
			} 
		} // end while
		
		// Fire Event
		for (OnCrawlListener listener : listeners) {
		    listener.onCrawlComplete();
		}
	}
	
	
	// Event Handling
	public interface OnCrawlListener {
		public void onPageCrawled(SimpleEntry<String, Integer> item);
		
		public void onCrawlComplete();
	}
	
	ArrayList<OnCrawlListener> listeners = new ArrayList<OnCrawlListener>();
	
	public void setOnCrawlListener(OnCrawlListener listener) {
		this.listeners.add(listener);
	}



	
}
