/**
 * 
 */
package webCrawler;

import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import org.htmlparser.Parser;
import org.htmlparser.filters.NodeClassFilter;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;

/**
 * @author mingfan
 * 
 */
public class WebCrawler {

	/**
	 * Where all visited URLs are stored.
	 * used to test a given URL has been visited or not
	 */
	private final Set<String> visitedURLs = Collections
			.synchronizedSet(new HashSet<String>());

	/**
	 * Thread pools for concurrency
	 */
	private final ExecutorService threadPool;

	/**
	 * Constant, used to determine number of working threads thread pool
	 * contains
	 */
	private final int ThreadPoolSizeMultiplier = 10;

	/**
	 * URL that web crawler will start with
	 */
	private final String targetUrlName;
	
	/**
	 * Used to calculate time used to do web crawling
	 */
	private final long startTime = System.nanoTime();

	// private methods ---------------------------------------------------

	/**
	 * @param urlName
	 * @return
	 */
	private boolean isURLVisited(final String urlName) {
		return visitedURLs.contains(urlName);
	}

	/**
	 * @param urlName
	 */
	private void setURLVisited(final String urlName) {
		visitedURLs.add(urlName);
	}

	/**
	 * Process one URL link
	 * Collect all links on web page of given URL link, process
	 * these sub-links the same way one by one. 
	 * 
	 * @param urlName
	 */
	private void processOneURL(final String urlName) {
		
		try {
			if (!isURLVisited(urlName)) {
				// Mark this URL as visited (processed)
				setURLVisited(urlName);

				// try to get all links on web page urlName specify
				URL urlLink = new URL(urlName);
				Parser parser = new Parser(urlLink.openConnection());
				NodeList nodeLists = parser.extractAllNodesThatMatch(new NodeClassFilter(LinkTag.class));
				List<String> urlsNeedProcess = new ArrayList<>();
				
				// For each of these extracted links
				for (int i = 0; i < nodeLists.size(); i++) {
					LinkTag extractedLinkTag = (LinkTag) nodeLists.elementAt(i);
					String extractedLink = extractedLinkTag.getLink();
					
					if (extractedLink.equals("http://www.canada.com")) {
						int a = 0;
					}
					
					// Add extracted link to a list, which will be processed later
					if (!extractedLink.isEmpty() && !isURLVisited(extractedLink)) {
						urlsNeedProcess.add(extractedLink);
					}
				}
				
				for (String oneURL: urlsNeedProcess) {
//					System.out.println("Start processing " + oneURL);
					startProcessOneURL(oneURL);
				}
				
				// Done with this link
//				System.out.println("Done with " + urlName);
				
				// Get time used to process 1500 links
				if (visitedURLs.size() == 1500) {
					System.out.println("Time used to proecess 1500 links: " + (System.nanoTime() - startTime)/1.0e9 + " seconds");
				}
			}
		} catch (ParserException | IOException e) {
//			e.printStackTrace();
		}
	}
	
	/**
	 * @param urlName
	 */
	private void startProcessOneURL(final String urlName) {
		
		threadPool.execute(new Runnable () {

			@Override
			public void run() {
				processOneURL(urlName);
			}
			
		});
	}

	// public methods----------------------------------------------------

	/**
	 * Constructor
	 */
	public WebCrawler(final String targetUrlName) {
		this.targetUrlName = targetUrlName;
		threadPool = Executors.newFixedThreadPool(Runtime.getRuntime()
				.availableProcessors() * ThreadPoolSizeMultiplier);
	}
	
	public void startWebCrawler() {
		startProcessOneURL(targetUrlName);
	}

	/**
	 * @param args
	 * 
	 *            Currently mainly for testing
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub
		new WebCrawler("http://www.montrealgazette.com").startWebCrawler();
	}

}
