package model;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * Parses web pages and extracts URLs from each page.
 * 
 * @author Thomas Nunn
 * @author David Everitt
 * @author Dexter Hu
 * @version TCSS 422 Operating Systems Winter 2014
 */
public class PageParser implements Runnable {

	/**
	 * The List of unique URLs to parse.
	 */
	private volatile List<String> myURLsToParse;

	/**
	 * The List of time spent parsing each page.
	 */
	private volatile List<Long> myTimePerPage;

	/**
	 * A Map containing URLs as keys and web page text as values.
	 */
	private Map<String, ArrayList<String>> myHTML;

	/**
	 * The PageRetriever
	 */
	private PageRetriever retriever;

	/**
	 * Represents the state of the web crawler, running or stopped.
	 */
	private volatile boolean isRunning = true;

	/**
	 * The status of this parser.
	 */
	private volatile boolean isComplete = false;

	/**
	 * Constructor
	 */
	public PageParser() {
		super();
		myURLsToParse = new ArrayList<>();
		myTimePerPage = new ArrayList<>();
		myHTML = new ConcurrentHashMap<>();
	}

	/**
	 * Parses each jsoup Document (web page) into words and embedded URLs.
	 * The time to parse each Document is recorded in the myTimePerPage field.
	 * 
	 * @param theDoc
	 */
	public synchronized void parsePage(Document theDoc) {
		String docURL = theDoc.location();


		long startTime = System.nanoTime();

		if (docURL.endsWith("txt/")) {
			//don't extract URL links from the page
		} else {

			// extract the URL links from the page
			Elements links = theDoc.select("a[href]");
			Pattern p = Pattern.compile("(?i)http://(.*?).");

			if (!links.isEmpty()) {
				for (Element link : links) {
					String linkURL = link.attr("abs:href");
					

					Matcher m = p.matcher(linkURL);
					if (m.matches() && !myURLsToParse.contains(linkURL)) {
						myURLsToParse.add(linkURL);
					}
				}
			}

			String innerHTML = theDoc.html();
			addToMap(docURL, innerHTML);

			long endTime = System.nanoTime();
			myTimePerPage.add(endTime - startTime);
		}
	}

	/**
	 * Adds a URL and its ArrayList of words to the Map myHTML.
	 * 
	 * @param URL The URL of the web page
	 * @param html The String holding the URL's content
	 */
	public void addToMap(String URL, String html) {
		ArrayList<String> list = new ArrayList<>();
		String[] arr;

		arr = html.split("\\.|\\!|\\,|\\\"|\\?|\\;|\\:|\\*|\\[|\\]|\\\\|\\@|\\<|\\>|\\s");

		// load each word into the ArrayList
		for (int i = 0; i < arr.length; i++) {
			String s = arr[i];
			if (s.equals("") || !s.equals(" ")) {
				list.add(arr[i].toLowerCase());
			}
		}

		myHTML.put(URL, list);
	}

	/**
	 * Adds a URL to the myURLsToParse field.
	 * 
	 * @param theURL The URL to add.
	 */
	public void addURL(String theURL) {
		if (!myURLsToParse.contains(theURL)) {
			myURLsToParse.add(theURL);
		}

	}

	/**
	 * Resets the fields.
	 */
	public void clear() {
		myURLsToParse.clear();
		myHTML.clear();
		myTimePerPage.clear();
		isRunning = true;
		isComplete = false;
		retriever = new PageRetriever();
	}

	/**
	 * @return a copy of the URL List.
	 */
	public synchronized List<String> getURLs() {
		return new ArrayList<String>(myURLsToParse);
	}

	/**
	 * @return The number of URLs parsed.
	 */
	public synchronized int getNumberParsed() {
		return myHTML.size();
	}

	/**
	 * @return the average number of URLs per page.
	 */
	public synchronized Integer getAvgURLs() {
		Integer result = 0;
		if (myHTML.size() > 0) {
			result = myURLsToParse.size() / myHTML.size();
		}
		return result;
	}

	/**
	 * @return the average time it takes to parse a page.
	 */
	public synchronized long getAvgTimePerPage() {
		long result = 0;
		int size = myTimePerPage.size();
		if (size > 0) {
			for (long time : myTimePerPage) {
				result += time;
			}
			result = result / size;
		}
		return result ;
	}

	/**
	 * @return a copy of the myHTML Map.
	 */
	public synchronized Map<String, ArrayList<String>> getHTMLMap() {
		return myHTML;
	}

	/**
	 * @return True if this parser is finished running, false otherwise.
	 */
	public synchronized boolean isFinished() {
		return isComplete;
	}

	/**
	 * Stops the run() method.
	 */
	public void stopCrawler() {
		isRunning = false;
		isComplete = true;
	}
	
	/**
	 * @return True if the run() methods is still active.
	 */
	public boolean isRunning() {
		return isRunning;
	}


	/**
	 * Sets this Object's PageRetriever.
	 * @param theRetriever
	 */
	public void setRetriever(PageRetriever theRetriever) {
		retriever = theRetriever;
	}

	/**
	 * Repeatedly checks the Retriever's jsoup Document List and parses
	 * new Documents until isRunning field is false.
	 */
	@Override
	public void run() {

		while (isRunning) {

			if (retriever.getNumberVisited() > 0) {
				for (Map.Entry<String, Document> entry : retriever.getDocList().entrySet()) {
					parsePage(entry.getValue());
					
					if(!isRunning) {
						break;
					}

				}
			}
		}
	}
}
