/**
 *
 */
package coms6111.astbbw.webdb;

import coms6111.astbbw.ResultSet;
import java.util.*;
import java.util.Map.Entry;
import java.util.regex.*;
import java.io.*;
import java.net.*;
import org.pdfbox.pdmodel.*;
import org.pdfbox.util.PDFTextStripper;

import org.apache.log4j.Logger;
/**
 * @author ben
 *
 */
public class DocumentSample {
	private static final int FETCH_MAX = 4;
	protected static int WAIT_MILLISECS = 1000;
	/**
	 * Map URL to clickURL (for Yahoo)
	 */
	protected Hashtable<String,String> pageLinks = new Hashtable<String,String>();
	/**
	 * Map URL to term-set for the corresponding page.
	 */
	protected Hashtable<String,Set<String>> pageTerms = new Hashtable<String,Set<String>>();
	protected boolean dataFetched = false;

	protected DocumentSample parent = null;
	protected ArrayList<DocumentSample> children = new ArrayList<DocumentSample>();
	private Logger l = Logger.getLogger(this.getClass());

	/**
	 * @param p the DocumentSample for the parent node
	 */
	public DocumentSample(DocumentSample p) {
		this.parent = p;
		this.parent.children.add(this);
	}

	/**
	 * Default constructor.
	 */
	public DocumentSample() {
		// empty constructor
	}

	/**
	 * Add up to four pages to the document sample from this ResultSet.
	 * @param rs
	 */
	public void addPages(ResultSet rs) {
		int local_max = rs.getNodeLen();
		if (FETCH_MAX < local_max) local_max = FETCH_MAX;
		l.trace("Adding " + local_max + " new pages");
		for (int i = 0; i < local_max; i++ ) {
			String keyURL = rs.doc[i].URL;
			if ( !pageLinks.containsKey(keyURL) ) {
				pageLinks.put(keyURL, rs.doc[i].clickURL);
			}
		}
	}

	/**
	 * @param pageURL
	 * @return true if this sample or an ancestor has stored terms for the given URL
	 */
	protected boolean hasTermsFor(String pageURL) {
		boolean answer = false;
		if (null != parent) answer = parent.hasTermsFor(pageURL);
		if (!answer) answer = pageTerms.containsKey(pageURL);
		return answer;
	}

	/**
	 * Fetch the given page, either as HTML or PDF, parse out its terms, and add the list of terms
	 * to the cache of page terms.
	 * @param pageURL the address of the page to fetch.
	 * @return True if the page was fetched, false if the page was already in the cache.
	 */
	protected boolean updatePage(String pageURL) {
		l.debug("fetching web page " + pageURL);
		if ( this.hasTermsFor(pageURL) ) {
			l.trace("terms already found: skipping");
			return false;
		}
		String clickURL = this.pageLinks.get(pageURL);
		Set<String> terms = null;
		if (pageURL.endsWith(".pdf")) {
			l.trace("fetching as PDF");
			terms = getPDFTerms(clickURL);
		} else {
			l.trace("fetching using lynx --dump");
			terms = coms6111.borrowed.getWordsLynx.runLynx(clickURL);
		}
		if (null != terms) {
			l.trace(terms.size() + " terms found: adding to hash");
			pageTerms.put(pageURL, terms);
		} else {
			l.warn("Found no terms for " + clickURL);
		}
		return true;
	}

	/**
	 * Fetch the page at a given URL, parse it as a PDF, and extract the text from it as a list of terms (groups of alphabetic characters).
	 * @param clickURL the URL to fetch
	 * @return a set of lower-case strings, each of which appears at least once in the document.
	 */
	public Set<String> getPDFTerms(String clickURL) {
		Set<String> terms;
		PDDocument pdf = null;
		terms = new TreeSet<String>();
		try {
			URL pageConnUrl = new URL(clickURL);
			InputStream is = pageConnUrl.openStream();
			pdf = PDDocument.load(is);
			PDFTextStripper strip = new PDFTextStripper();
			String txt = strip.getText(pdf).toLowerCase();
			Pattern p = Pattern.compile("[a-z]+");
			Matcher m = p.matcher(txt);
			while ( m.find() ) {
				terms.add(txt.substring(m.start(),m.end()));
			}
		} catch (MalformedURLException e) {
			System.err.println("ClickURL '" + clickURL + "' is malformed");
		} catch (IOException e) {
			l.warn("Problem fetching or parsing " + clickURL, e);
			System.err.println("IOException fetching " + clickURL);
		} finally {
			if (null != pdf) {
				try {
					pdf.close();
				} catch (IOException e) {
					l.warn("Exception closing PDF document.");
				}
			}
		}
		return terms;
	}

	/**
	 * Get the term set for a given URL, searching recursively through parent samples.
	 * @param pageURL The page URL to fetch.
	 * @return the set of strings, fetched either from the local store or from the parent.  If terms have
	 * not been updated or this page is not part of this sample, returns null.
	 */
	protected Set<String> getTermsFor(String pageURL) {
		if (pageTerms.containsKey(pageURL)) {
			l.trace("returning terms for " + pageURL);
			return pageTerms.get(pageURL);
		}
		if (parent.hasTermsFor(pageURL)) {
			l.trace("retrieving terms for " + pageURL + " from parent");
			return parent.getTermsFor(pageURL);
		}
		return null;
	}

	/**
	 * Fetch the terms for all documents in this sample.
	 * @throws InterruptedException in a somewhat far-fetched case (if
	 * the thread is interrupted while sleeping between fetches).
	 */
	public void updateAll() throws InterruptedException {
		l.debug("fetching all terms for " + this);
		if (null != parent && !parent.dataFetched) {
			throw new RuntimeException("Update on child attempted before parent");
		}
		Set<String> pagesWanted = pageLinks.keySet();
		Iterator<String> urls  = pagesWanted.iterator();
		while (urls.hasNext()) {
			String thisPage = urls.next();
			if ( this.updatePage(thisPage) )
				Thread.sleep(WAIT_MILLISECS);
		}
		this.dataFetched = true;
		for (int i = 0; i < children.size(); i++) children.get(i).updateAll();
	}

	/**
	 * Get the combined document frequency hash for this node and any children.
	 * @return a TreeMap containing every term in the document sample, and the number of documents it appears in.
	 */
	public TreeMap<String,Integer> getDocumentFrequencies() {
		l.debug("Producing treemap");
		TreeSet<String> visited = new TreeSet<String>();
		TreeMap<String, Integer> df = new TreeMap<String,Integer>();
		getDocumentFrequencies(visited, df);
		return df;
	}

	/**
	 * Retrieve the breakdown of file extensions at this node.  No attempt is made to merge three
	 * and four-letter equivalents (e.g. "htm" and "html"), since this is purely for general informational
	 * purposes.  The key "other" is used for files with no extension, and the key "dynamic" is used for
	 * URLs that contain a query string.
	 * @return A hash where the keys are file extensions and the values are the number of times that extension appears.
	 */
	public Hashtable<String,Integer> getTypeStats() {
		Enumeration<String> foo = this.pageLinks.keys();
		Hashtable<String,Integer> h = new Hashtable<String,Integer>();
		while (foo.hasMoreElements()) {
			String url = foo.nextElement();
			int dotidx = url.indexOf('.', url.length() - 5);
			String k;
			if (url.indexOf('?') > 0) k = "dynamic";
			else if (dotidx > 0) k = url.substring(dotidx + 1);
			else k = "other";

			if (h.containsKey(k)) {
				int v = h.get(k);
				h.put(k, v + 1);
			} else {
				h.put(k, 1);
			}
		}
		return h;
	}

	/**
	 * @param visited The set of all URLs that have already been visited (will be updated).
	 * @param df The current document-frequency information (will be updated).
	 */
	protected void getDocumentFrequencies(TreeSet<String> visited, TreeMap<String, Integer> df) {
		Iterator<String> allURLs = pageLinks.keySet().iterator();
		while (allURLs.hasNext()) {
			String thisURL = allURLs.next();
			l.trace("Retrieving terms for " + thisURL);
			if (visited.contains(thisURL)) {
				l.debug("second visit to " + thisURL);
				continue;
			} else {
				visited.add(thisURL);
			}
			Iterator<String> termIterator = this.getTermsFor(thisURL).iterator();
			while (termIterator.hasNext()) {
				String thisTerm = termIterator.next();
				if (df.containsKey(thisTerm)) {
					int current = df.get(thisTerm);
					df.put(thisTerm, current + 1);
				} else {
					df.put(thisTerm, 1);
				}
			}
		}
		for (int i = 0; i < this.children.size(); i++ ) {
			DocumentSample d = children.get(i);
			d.getDocumentFrequencies(visited, df);
		}
		//return df;
	}

	public static void printSample(TreeMap<String,Integer> sampleMap, String filename) throws IOException {
		File outFile = new File(filename);
		BufferedWriter o = new BufferedWriter (
				new OutputStreamWriter(new FileOutputStream(outFile)));

		Iterator<Entry<String,Integer>> i = sampleMap.entrySet().iterator();
		while (i.hasNext()) {
			Entry<String,Integer> e = i.next();
			Object args[] = { e.getKey(), e.getValue() };
			String tmp = String.format("%s: %d\n", args);
			o.write(tmp);
		}
		o.close();
	}

	public static void main(String args[]) {
		DocumentSample s = new DocumentSample();
		String url = args.length > 0 ? args[0] :
			"http://www.cs.columbia.edu/6111/20091/Readings/zobel06.pdf";
		Iterator<String> e = s.getPDFTerms(url).iterator();
		while (e.hasNext()) {
			String t = e.next();
			System.out.println(t);
		}

	}

}
