package crawler;

import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;

import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * Downloads web page content starting with a starting url. If the spider
 * encounters links in the content, it downloads those as well.
 * 
 * @author Stuart Hansen
 * @version April 14, 2011
 * 
 */
public class ConcurrentSpiderComplete extends Thread {

	public static final boolean DEBUG = false;
	public static final boolean VERBOSE = true;
	// 69.81.205.167
	private static final String DATABASE_URL = "jdbc:mysql://localhost/groupwork_db";
	private static final String DATABASE_USER = "groupwork";
	private static final String DATABASE_PW = "smiles";

	// The list of URLs waiting to be explored
	private static List<String> work = new CopyOnWriteArrayList<String>();

	// The list of URLs that have been processed
	private static List<String> finished = new CopyOnWriteArrayList<String>();

	// The list of URLs that we couldn't follow for one reason or another
	private static List<String> nonRetrievableURLs = new CopyOnWriteArrayList<String>();

	// The data structure for all the information we collect on each URL
	private static ConcurrentHashMap<String, URLData> urlDataMap = new ConcurrentHashMap<String, URLData>();

	// The list containing html source pages.
	private static List<String[]> sourcePages = new CopyOnWriteArrayList<String[]>();

	// The list containing PageQnA to be sent to the Database
	private static List<PageQnA> pageQuestionAnswer = new CopyOnWriteArrayList<PageQnA>();

	// The number of URLs that should be processed
	private int maxUrls = 5;

	// identifier for this particular spider

	/**
	 * Accessor for the sourcePages queue
	 */
	public List getSourcePages() {
		return sourcePages;
	}

	/**
	 * Creates a new spider that will crawl at most maxUrls.
	 * 
	 * @param maxUrls
	 */
	public ConcurrentSpiderComplete(int maxUrls) {
		this.maxUrls = maxUrls;
	}

	/**
	 * Start this spider working
	 */
	@Override
	public void run() {
		try {
			crawl();
		} catch (Exception e) {
			System.err.println("Id: " + this.toString() + " " + e.getMessage());
		}
	}

	/**
	 * Crawls at most maxUrls starting with beginningUrl.
	 * 
	 * @param beginningUrl
	 */
	public void crawl() throws Exception {
		while (finished.size() < maxUrls && !work.isEmpty()) {

			String urlStr = work.remove(0);
			updateData(urlStr);
			// System.out.println("Id: " + me + " " + urlStr);
			String htmlPage = retrieve(urlStr);
			sourcePages.add(new String[] { urlStr, htmlPage });
			if (htmlPage != null) {
				scrapePage(urlStr, htmlPage);
				finished.add(urlStr);
			} else {
				nonRetrievableURLs.add(urlStr);
			}
		}
	}

	/**
	 * Scrapes the page looking for new URLs
	 * 
	 * @param urlStr
	 *            the baseURL for references
	 * @param htmlPage
	 *            the page being scraped
	 */
	public void scrapePage(String urlStr, String htmlPage) {
		// Find all the links on the page
		List<String> links = extractLinks(urlStr, htmlPage);
		urlDataMap.get(urlStr).setLinkCount(links.size());

		// Walk through the links adding them to work and updating their
		// reference counts
		Iterator<String> it = links.iterator();

		while (it.hasNext()) {
			String link = it.next();
			if (!urlDataMap.containsKey(link)) {
				work.add(link);
			}
			updateData(link);
		}
	}

	/**
	 * This is the regular expression to recognize urls
	 */
	private static final Pattern MATCH_HREF = Pattern
			.compile("<a.*?href\\s*=\\s*['\"](.*?)['\"].*?>(.*?)</a>");

	/**
	 * Returns the web links contained in the html content.
	 * 
	 * @param html
	 * @return
	 */
	public List<String> extractLinks(String baseUrl, String html) {
		if (!baseUrl.endsWith("/")) {
			baseUrl += "/";
		}
		List<String> links = new ArrayList<String>();
		Matcher m = MATCH_HREF.matcher(html.toLowerCase());
		while (m.find()) {
			String url = m.group(1);
			if (!url.startsWith("http://") && !url.startsWith("https://")) {
				if (url.startsWith("/")) {
					url = "http://www.wikipedia.org";
				} else {
					url = baseUrl + url;
				}
				links.add(url);
			} else {
				// Don't add the url for now. We want to stay on wikipedia.org.
			}

		}
		return links;
	}

	/**
	 * Makes sure this URL is in the map and then updates its reference count
	 * 
	 * @param urlStr
	 */
	public void updateData(String urlStr) {
		URLData urlDatum = urlDataMap.get(urlStr);
		if (urlDatum == null) {
			urlDatum = new URLData(urlStr);
			urlDataMap.put(urlStr, urlDatum);
		}
		urlDatum.incrementReferenceCount();
	}

	/**
	 * Returns the contents of a url as a string.
	 * 
	 * @param urlStr
	 * @return String contents or null in case of an error.
	 */
	public String retrieve(String urlStr) {
		try {
			// System.out.println("Id: " + me + " retrieving " + urlStr);

			// Open the url and read it byte by byte
			URL url = new URL(urlStr);
			InputStream in = (InputStream) url.openConnection().getContent();
			String result = "";
			while (true) {
				int c = in.read();
				if (c < 0) {
					break;
				}
				if (DEBUG) {
					System.out.print((char) c);
				}
				result += (char) c;
			}
			return result;
		} catch (IOException e) {
			System.err.println(this.getName() + " Fetch of " + urlStr
					+ " failed");
			return null;
		}
	}

	/**
	 * Report the results of the crawling
	 */
	public static void reportResults() {

		// Report the summary results
		System.out
				.println("\n************************************************************");
		System.out.println("Total URLs found: " + urlDataMap.size());
		System.out.println("Remaining Work: " + work.size());
		System.out.println("Finished URLs: " + finished.size());
		System.out
				.println("Non Retrievable URLs: " + nonRetrievableURLs.size());
	}

	/**
	 * This small inner class keeps track of the data for each URL
	 */
	private class URLData {

		private int referenceCount; // the number of times this page is
									// reference by others
		private int linkCount; // the number of links contained on this page
		private String urlString; // the URL for this page

		public URLData(String url) {
			this.referenceCount = 0;
			this.linkCount = 0;
			this.urlString = url;
		}

		public void incrementReferenceCount() {
			++referenceCount;
		}

		public int getReferenceCount() {
			return referenceCount;
		}

		public String getUrlString() {
			return urlString;
		}

		public int getLinkCount() {
			return linkCount;
		}

		public void setLinkCount(int linkCount) {
			this.linkCount = linkCount;
		}
	}

	/**
	 * Start the program running
	 */
	public static void main(String[] args) throws Exception {
		final int THEADCOUNT = 4;
		if (VERBOSE) {
			System.out.println("Adding pages to the work list.");
		}
		if (DEBUG) {
			work.add("https://en.wikipedia.org/wiki/Alan_Turing");
		} else {
			if (args.length > 0) {
				for (int i = 0; i < args.length; i++) {
					work.add(args[i]);
				}
			} else {
				work.add("https://en.wikipedia.org/wiki/Alan_Turing");
			}
		}
		if (VERBOSE) {
			System.out.println("Starting page crawlers.");
		}
		ConcurrentSpiderComplete[] spider = new ConcurrentSpiderComplete[THEADCOUNT];
		for (int i = 0; i < THEADCOUNT; i++) {
			spider[i] = new ConcurrentSpiderComplete(10);
			spider[i].start();
			// spider[i].crawl("http://www.uwp.edu/departments/computer.science/");
		}
		for (int i = 0; i < THEADCOUNT; i++) {
			spider[i].join();
		}
		if (VERBOSE) {
			System.out.println("Page crawlers finished.");
		}
		// ***********************************************************
		if (VERBOSE) {
			System.out
					.println("Generating PageQnA using WikiPageScraper and QuestionGenerator.");
		}
		while (!sourcePages.isEmpty()) {
			String source[] = sourcePages.remove(0);
			if (source[0] != null && source[1] != null) {
				WikiPageScraper scraper = new WikiPageScraper(source[1]);
				QuestionGenerator generator = new QuestionGenerator(source[0],
						scraper.getTitle(), scraper.getTableInfo(),
						scraper.getSentences());
				pageQuestionAnswer.add(generator.getPageQnA());
			}
		}
		if (VERBOSE) {
			System.out
					.println("Submitting pageQuestionAnswer to QuestionDBConnector");
		}
		try {
			QuestionDBConnector connector = new QuestionDBConnector();
			connector.initialize(DATABASE_URL, DATABASE_USER, DATABASE_PW);
			if (DEBUG) {
				List<PageQnA> lpqa = new CopyOnWriteArrayList<PageQnA>();
				PageQnA pqa = new PageQnA("http://test.url/", "Test Title");
				List<String> q = new CopyOnWriteArrayList<String>();
				List<Integer> a = new CopyOnWriteArrayList<Integer>();
				q.add("This is a test question about the number ___.");
				a.add(5);
				pqa.addQnA(q, a);
				lpqa.add(pqa);
				connector.insertPageQnA(lpqa);
			} else {
				connector.insertPageQnA(pageQuestionAnswer);
			}
			connector.close();
		} catch (Exception e) {
			System.out.println(e);
		}

		reportResults();
	}
}