import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.UUID;

public class ContentSummary {
    /* ======================================================
     * MEMBERS
     * ====================================================== */
    //This is a hash on each url and points to its file location on disk
    private static HashMap<String, String> _documents = new HashMap<String, String>();

    /* ======================================================
     * METHODS
     * ====================================================== */
    public static HashMap<String, String> getDocuments(){
	return _documents;
    }

    // BUILD UNIQUE DOCS
    //at each node, build a hash of unique docs that comprises the full index for that node
    public static void buildUniqueDocs(Node myNode, HashMap<String, String> uniques) {
	if (myNode == null) {}
	else {
	    //add passed unique docs
	    for (String url : uniques.keySet()) {
		myNode.addUniqueURL(url);
	    }

	    //add current node unique docs
	    for (String query : myNode.getDocumentSample().keySet()) {
		int count = 0;
		for (String url : myNode.getDocumentSample().get(query)){
		    if (count < 4) {
			myNode.addUniqueURL(url);
			//System.out.println(url);
		    }
		    count++;
		}
	    }
	    buildUniqueDocs(myNode.getParent(), myNode.getUniqueDocs());
	}
    }

    public static void printDocs() {
	for (String URL : _documents.keySet()) {
	    System.out.println(URL);
	}
    }

    //we can kill this later..
    public static void testIndex(Node myNode) {
	if (myNode == null) {}
	else {
	    System.out.println(myNode.getCategory());
	    for (String URL : myNode.getUniqueDocs().keySet() ) {
		System.out.println(URL);
	    }
	    testIndex(myNode.getParent());
	}

    }

    // BUILD INDEX
    //not really the most efficient but meh we'll see how bad it is
    @SuppressWarnings("unchecked")
    public static void buildIndex(Node myNode) {
	if (myNode == null) {}
	else {
	    //build index per node, given all unique docs
	    for (String URL : myNode.getUniqueDocs().keySet()) {
		if(_documents.containsKey(URL)) {
		    (myNode.getIndex()).build(getWordsLynx.runLynx(_documents.get(URL)));
		}
		else {
		    System.out.println("ERROR - No URL-File match");
		    System.out.println("URL: " + URL);
		    System.out.println("Category: " + myNode.getCategory());
		}
	    }
	    buildIndex(myNode.getParent());
	}

    }

    //WRITE INDEX
    public static void writeIndex(Node myNode, String DB) {
	if (myNode == null) {}
	else {
	    if (!myNode.getIndex().IsEmpty()) {
		(myNode.getIndex()).print(myNode.getCategory() + "-" + DB + ".txt");
	    }
	    writeIndex(myNode.getParent(), DB);
	}
    }




    // GET DOCUMENT SAMPLES
    public static void downloadDocSamples(Node category, Node child){
	//If this is a leaf, no need to download a sample
	if (category.hasChildren()) {
	    System.out.println("Creating content summary for: " + category.getCategory());
	    Integer count = 0;

	    //Add the document sample of the current node's child because the document sample contains within
	    //it the document sample of the subcategories that were visited during classification
	    if (child != null) {
		category.getDocumentSample().putAll(child.getDocumentSample());
		category.getUniqueDocs().putAll(child.getUniqueDocs());
	    }
	    //Iterate through each query for the current node
	    for (String query : category.getDocumentSample().keySet()){
		count++;
		if (category.getDocumentSample().get(query).size() > 0){
		    System.out.println(count + "/" + category.getDocumentSample().size());    
		}
		//For each query, download the top 4 urls
		Integer docCount = 0;
		for (String url : category.getDocumentSample().get(query)){
		    if (docCount < 4){
			//Ignore any pdf, ppt, or doc files as they create a lot of noise words
			if (url.toLowerCase().endsWith("pdf") || url.toLowerCase().endsWith("ppt") || url.toLowerCase().endsWith("doc")){
			    continue;
			}
			//Check to see if this is a duplicated document
			if (!ContentSummary.getDocuments().containsKey(url)){
			    //Create a unique filename if the URL either ends with just a directory or it
			    //contains a query string in it
			    String filename = "cache/documents/" + url.replace("http://", "").replace("https://", "");
			    if (filename.endsWith("/") || filename.indexOf("?") > -1 || filename.indexOf(".", filename.lastIndexOf("/")) == -1){
				if (filename.indexOf("?") > -1 ){
				    //Strip out the query string
				    filename = filename.substring(0, filename.indexOf("?"));
				}
				//Check again to see if this is a pdf, ppt or doc in case we couldn't check earlier because of
				//a query string
				if (filename.toLowerCase().endsWith("pdf") || filename.toLowerCase().endsWith("ppt") || filename.toLowerCase().endsWith("doc")){
				    continue;
				}

				//Assign a unique filename
				filename = filename.substring(0, filename.lastIndexOf("/")) + "/" + UUID.randomUUID() + ".html";
			    }

			    System.out.println("Getting: " + url);

			    //ALAN EDIT
			    //fixes cases where cached docs are not going into master list (so unknown file reference later)
			    if (new File(filename).exists()) {
				ContentSummary.getDocuments().put(url, filename);
			    }

			    //Check to see if the file already exists on disk and download it if it doesn't
			    if (!(new File(filename).exists())){

				InputStream is = null;
				URL u;
				BufferedReader br;
				String s;
				FileWriter fstream;
				BufferedWriter out = null;

				try {
				    Thread.sleep(750);
				    //Create the directory if it does not already exist
				    String dirs = filename.substring(0, filename.lastIndexOf("/"));
				    if (!(new File(dirs)).exists()){
					(new File(dirs)).mkdirs();
				    }

				    fstream = new FileWriter(filename);
				    out = new BufferedWriter(fstream);
				    u = new URL(url);
				    is = u.openStream();
				    br = new BufferedReader(new InputStreamReader(is));
				    s = br.readLine();
				    //Write out the file contents
				    while (s != null) {
					out.write(s);
					out.newLine();
					s = br.readLine();
				    }
				    //Add the current document to the global documents list
				    ContentSummary.getDocuments().put(url, filename);
				} catch (MalformedURLException e) {
				    System.out.println(e.getMessage());
				    //If there was a problem while writing
				} catch (IOException e) {
				    // TODO Auto-generated catch block
				    System.out.println(e.getMessage());
				    continue;
				} catch (Exception e) {
				    System.out.println(e.getMessage());
				    continue;
				}
				finally {
				    try {
					if (is != null){
					    is.close();
					}
				    } catch (IOException ioe) {
					System.out.println(ioe.getMessage());
					continue;
				    } catch (Exception e){
					System.out.println(e.getMessage());
					continue;
				    }
				    try {
					if (out != null){
					    out.close();
					}
				    } catch (IOException ioe) {
					System.out.println(ioe.getMessage());
					continue;
				    } catch (Exception e){
					System.out.println(e.getMessage());
					continue;
				    }
				}
			    }
			}
			category.addUniqueURL(url);
			docCount++;
		    }
		    else {
			continue;
		    }
		}
	    }
	}

	//Recurse from bottom up
	if (category.getParent() != null){
	    downloadDocSamples(category.getParent(), category);
	}
    }
}
