package core.proto.inforet;

import java.io.*;
import java.util.*;
import java.util.Map.Entry;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.lang.Math;
import static app.constants.CollabVizConstants.*;
import net.htmlparser.jericho.TextExtractor;

import org.apache.commons.io.FileUtils;
import org.apache.commons.io.FilenameUtils;

import app.exceptions.CollabVizException;
import app.util.PropertyLoaderUtil;

import core.proto.Util;
import core.proto.search.HtmlTextExtractor;

import Jama.Matrix;

/**
 * Class for holding info on a word, such as document frequency and term frequency. 
 */
class FreqInfo{
	int wordIndex; // start at 0. 
	int docFreq; 
	Map<Integer, Integer> docIdToTermFreqMap;

	public FreqInfo(int wordIndex){
		docFreq = 0;
		this.wordIndex = wordIndex;
		docIdToTermFreqMap = new HashMap<Integer, Integer>();
	}

	public void incrementTermFreq(int docId){
		if (docIdToTermFreqMap.containsKey(docId)){
			docIdToTermFreqMap.put(docId, ((Integer) docIdToTermFreqMap.get(docId)).intValue() + 1);
		}else{
			docIdToTermFreqMap.put(docId, new Integer(1));
		}
	}

	public int getWordIndex(){
		return this.wordIndex;
	}

	public Map<Integer, Integer> getDocIdToTermFreqMap(){
		return docIdToTermFreqMap;
	}

	public void incrementDocFreq(){
		this.docFreq += 1;
	}

	public int getDocFreq(){
		return docFreq;
	}
}

/**
 * Class for generating the similarity matrix file.
 */
public class SimilarityMatrixUtil {
	/**
	 * How much to scale the weight for the visualization edges. 
	 */
	static final double edgeWeightScale = 1000;
	/**
	 * words must have this syntax.
	 */
	static final String acceptedRegex = "\\w+(:\\w+)?";  
	/**
	 */
	static final String alphaRegex = "[A-Za-z]"; 
	/**
	 * Tokenizer uses this to split text up. 
	 */
	static final String splitRegex = "[\\s.,/-]+";	
	static final int minWordLen = 3;
	static final String delimiter = "###";

	// We won't use other (more modular/fancier) means of xml generation. 
	// If need to change xml file structure, just change these string constants or 
	// one of the writeXml*() methods. 
	static final String xmlTail = "\t</graph>\n</graphml>";
	static final String xmlHead = 
		"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
		"<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns\">\n" + 
		"\t<graph edgedefault=\"undirected\">\n" + 
		"\t\t<!-- data schema -->\n" + 
		"\t\t<key id=\"name\" for=\"node\" attr.name=\"name\" attr.type=\"string\" />\n" + 
		"\t\t<key id=\"type\" for=\"node\" attr.name=\"type\" attr.type=\"string\" />\n" + 
		"\t\t<key id=\"path\" for=\"node\" attr.name=\"path\" attr.type=\"string\" />\n" + 
		"\t\t<key id=\"weight\" for=\"edge\" attr.name=\"weight\" attr.type=\"double\" />\n" + 
		"\t\t<key id=\"name\" for=\"edge\" attr.name=\"name\" attr.type=\"string\" />\n";

	public static void main(String[] args) throws IOException {
		if (args.length != 3 && args.length != 4){
			System.err.println("Usage: java " + SimilarityMatrixUtil.class.getName() + " <stoplistFilePath> <htmlFileFolder> <outputFilePath> [ignorelistFilePath]");
			System.err.println("\tignorelistFilePath : optional newline delimited textfile, each line represents a set of words to ignore that can occur in an edge");
			System.exit(-1);
		}

		// Initialize various patterns for deciding whether to include a token. 
		Pattern pattern = Pattern.compile(alphaRegex);
		Matcher alphaMatcher = pattern.matcher("");
		Matcher acceptedMatcher = Pattern.compile(acceptedRegex).matcher("");

		// Init the stop list. 
		Set<String> stopWords = new HashSet<String>();
		readWordList(args[0], stopWords);
		// Init the ignore list. 
		Set<String> ignoreWords = new HashSet<String>();
		if (args.length == 4){
			readIgnoredEdges(args[3], ignoreWords);
		}
		// Go through html files, and collect statistics. 
		String[] htmlExtension = {"html", "htm"};
		Set<String> encountered = new HashSet<String>();	// what words have been encountered in current document so far. 
		Iterator<File> fileIter = FileUtils.iterateFiles(new File(args[1]), htmlExtension, true);

		Map<String, Integer> docPathToIdMap = new HashMap<String, Integer>(); // document ID = its zero based index in the term document matrix.  
		Map<String, FreqInfo> wordToFreqInfo = new HashMap<String, FreqInfo>();
		while(fileIter.hasNext()){
			encountered.clear();	

			File htmlFile = fileIter.next();
			int docId = docPathToIdMap.size();
			
			// We use the document name, e.g. use "Fisk" for "C://blah//Fisk.html", as the key, instead of 
			// hard coding the absolute path, so that the index is portable and 
			// does not need to be regenerated all the time. 
			docPathToIdMap.put(FilenameUtils.getBaseName(htmlFile.getAbsolutePath()), docId);

			TextExtractor extractor = HtmlTextExtractor.createExtractor(htmlFile);
			String[] tokens = tokenize(extractor.toString());

			for(String s : tokens){
				s = s.toLowerCase();
				// reset pattern matchers to use current word value. 
				alphaMatcher.reset(String.valueOf(s.charAt(0)));
				acceptedMatcher.reset(s);
				// decide whether to include this word. 
				if (! stopWords.contains(s) && alphaMatcher.matches() && acceptedMatcher.matches() && s.length() >= minWordLen){
					if (! wordToFreqInfo.containsKey(s)){
						wordToFreqInfo.put(s, new FreqInfo(wordToFreqInfo.size()));
					}
					// increment the doc frequency if needed. 
					if (! encountered.contains(s)){
						((FreqInfo)wordToFreqInfo.get(s)).incrementDocFreq();
					}
					// increment the term freq. 
					((FreqInfo) wordToFreqInfo.get(s)).incrementTermFreq(docId);
				}
			}
		}
		Map<Integer, String> wordIndexToWordMap = new HashMap<Integer, String>(); 

		// Create the term document matrix. 
		int totalDocs = docPathToIdMap.size();
		int totalWords = wordToFreqInfo.size();
		Matrix termDocMatrix = new Matrix(totalWords, totalDocs, 0);
		for(Entry<String, FreqInfo> wordInfoTuple : wordToFreqInfo.entrySet()){
			FreqInfo info = wordInfoTuple.getValue();

			wordIndexToWordMap.put(info.getWordIndex(), wordInfoTuple.getKey());
			for(Entry<Integer, Integer> docIdTfTuple : info.getDocIdToTermFreqMap().entrySet()){
				termDocMatrix.set(info.getWordIndex(), 
						docIdTfTuple.getKey(), 
						computeTf(docIdTfTuple.getValue()) * computeIdf(totalDocs, info.getDocFreq()));
			}
		}
		normalize(termDocMatrix);

		// Output the file. 
		PrintWriter writer = null;
		File output = new File(args[2]);
		if (output.exists() && output.isFile()){
			output.delete();
		}
		try{
			List<String> docPathSorted = getSortedDocName(docPathToIdMap);

			writer = new PrintWriter(new FileWriter(output), true);
			writeXmlHeader(writer);
			writeXmlNodeInfo(writer, docPathSorted, args[1]);

			// Debug. 
			//			printMatrix(termDocMatrix, docPathSorted, wordIndexToWordMap, true);

			List<String> commonWords = new LinkedList<String>();
			// Finally compute similarity between all pairs of documents.
			for(int i = 0; i < termDocMatrix.getColumnDimension() - 1; ++i){
				for(int j=i+1; j < termDocMatrix.getColumnDimension(); ++j){
					commonWords.clear();
					double similarity = computeSimilarity(i, j, termDocMatrix);
					computeCommonWords(i, j, termDocMatrix, wordIndexToWordMap, commonWords);
					if (similarity > 0 && shouldAddEdge(commonWords, ignoreWords)){
						//						writeXmlEdgeInfo(writer, i, j, (int) (edgeWeightScale * commonWords.size() / unionSize), commonWords);
						writeXmlEdgeInfo(writer, i, j, 1 + (int) (similarity * 100), commonWords);
					}
					//				Util.p("Similarity btw doc " + i + " and " + j + " is " + similarity);
					//				Util.p("Common words are " + commonWords.toString());
					//				Util.p("\n\n");
				}
			}
			writeXmlTail(writer);
		}finally{
			if (writer != null)
				writer.close();
		}
	}

	static boolean shouldAddEdge(List<String> commonWords, Set<String> ignoreEdges){
		if (commonWords.size() == 0)
			return false;
		String filterString = buildFilterString(commonWords, new StringBuilder());
		return ! ignoreEdges.contains(filterString);
	}

	static List<String> getSortedDocName(Map<String, Integer> docPathToIdMap){
		List<String> result = new ArrayList<String>();
		List<Entry<String, Integer>> sorted = new LinkedList<Entry<String, Integer>>(docPathToIdMap.entrySet());
		Collections.sort(sorted, 
				new Comparator<Entry<String, Integer>>(){
			public int compare(Entry<String, Integer> e1, Entry<String, Integer> e2){
				return e1.getValue().compareTo(e2.getValue());
			}
		});
		for(Entry<String, Integer> entry : sorted){
			result.add(entry.getKey());
		}
		return result;
	}

	static void printMatrix(Matrix matrix, List<String> docPathSorted, Map<Integer, String> wordIndexToWord, boolean ignoreZeros){
		for(int i=0; i<matrix.getColumnDimension(); ++i){
			System.out.println(docPathSorted.get(i));
			for(int j=0; j<matrix.getRowDimension(); ++j){
				if (! (ignoreZeros && Double.compare(matrix.get(j, i), 0) == 0)){ 
					System.out.println(
							String.format(
									"\t%s : tf-idf=%s", 
									wordIndexToWord.get(Integer.valueOf(j)), 
									matrix.get(j, i)
							)
					);
				}
			}
			System.out.println();
		}
	}

	static void writeXmlHeader(PrintWriter writer){
		writer.println(xmlHead);
	}

	static void writeXmlNodeInfo(PrintWriter writer, List<String> docPathSorted, String htmlFileFolder){

		int count = 0;
		for(String entry : docPathSorted){
			// The path = htmlFolderAbsPath | rest, where | means concat. 
			// For compabilitiy reasons with other parts of the system that use paths as 
			// the key to identify documents, we have to store the 'rest' part of the path. 
			String path;
			path = entry.replace(Util.removeDoubleSlash(htmlFileFolder), "");
			// This is the other not so modular part of our system. 
			// We need to classify html files into nominal categories so that they 
			// can be color coded when rendered, and yet this info is not stored 
			// anywhere in the html file itself. Thus, we rely on the hack below to 
			// give COLD cases their own colors, and the non-cold cases we can 
			// use their parent folder name as the category. 
			String type = FilenameUtils.getBaseName(new File(entry).getParent());
			if (type.toLowerCase().contains("cold")){
				type = FilenameUtils.getBaseName(entry);
			}
			// Finally, we can write out the stuff. 
			writer.println("\t\t<node id=\"" + count + "\">");
			writer.println("\t\t\t<data key=\"name\">" + FilenameUtils.getBaseName(entry)+ "</data>");
			writer.println("\t\t\t<data key=\"type\">" + type + "</data>");
			writer.println("\t\t\t<data key=\"path\">" + path + "</data>");
			writer.println("\t\t</node>");
			++count;
		}
	}

	static void writeXmlEdgeInfo(PrintWriter writer, int src, int tgt, int weight, List<String> commonWords){
		writer.println("\t\t<edge source=\"" + src + "\" target=\"" + tgt + "\">");
		writer.println("\t\t\t<data key=\"weight\">" + weight + "</data>");
		StringBuilder builder = new StringBuilder();
		int count = 0;
		for(String s : commonWords){
			if (count + 1 == commonWords.size()){
				builder.append(s);
			}else{
				builder.append(s + ",");
			}
			++count;
		}
		writer.println("\t\t\t<data key=\"name\">" + builder.toString() + "</data>");
		writer.println("\t\t</edge>");
	}

	static void writeXmlTail(PrintWriter writer){
		writer.println(xmlTail);
	}

	/**
	 * Picks words common between documents corresponding to given columns. Word is 
	 * common iff its normalized tf-idf score is greater than 0 in both documents (
	 * intuition is that the word is relevant in both documents, and since tf-idf of 
	 * a word is 0 if word doesn't occur in a document, we cover the case where 
	 * word is not present in the document). 
	 * @param col1 Index of 1st document in matrix. 
	 * @param col2 Index of 2nd document in matrix. 
	 * @param matrix Term document matrix. 
	 * @param result Collector of common words. 
	 * @param wordMap Map of matrix-index of word to the word string. 
	 * @return Total number of words in the union of the two documents. 
	 */
	static int computeCommonWords(int col1, int col2, Matrix matrix, Map<Integer, String> wordMap, List<String> result){
		Matrix vector1 = matrix.getMatrix(0, matrix.getRowDimension()-1, col1, col1);
		Matrix vector2 = matrix.getMatrix(0, matrix.getRowDimension()-1, col2, col2);
		int totalNumWords = 0;
		for(int r=0; r<matrix.getRowDimension(); ++r){
			if (vector1.get(r, 0) != 0 || vector2.get(r, 0) != 0)
				++totalNumWords;
			// we only consider two words common if they are relevant. 
			if (vector1.get(r, 0) > 0 && vector2.get(r, 0) > 0){
				result.add(wordMap.get(r));
			}
		}
		return totalNumWords;
	}

	static void printDimension(Matrix matrix){
		System.out.println(String.format("%sx%s", matrix.getRowDimension(), matrix.getColumnDimension()));
	}

	static double computeSimilarity(int v1, int v2, Matrix matrix){
		Matrix termVector1 = matrix.getMatrix(0, matrix.getRowDimension()-1, v1, v1).transpose();
		Matrix termVector2 = matrix.getMatrix(0, matrix.getRowDimension()-1, v2, v2);
		Matrix dotProduct = termVector1.times(termVector2);
		// sanity check.
		if (dotProduct.getRowDimension() != 1 || dotProduct.getColumnDimension() != 1){
			throw new Error(
					String.format(
							"Something wrong with the dot product computation, dimensions are %sx%s",
							dotProduct.getRowDimension(), 
							dotProduct.getColumnDimension()
					)
			);
		}
		return dotProduct.get(0, 0);
	}

	static double computeIdf(int totalDocs, int docFreq){
		if (docFreq == 0)
			return 0;
		return Math.log10(1.0 * totalDocs / docFreq);
	}

	static void normalize(Matrix matrix){
		double euclideanLength;

		for(int c = 0; c < matrix.getColumnDimension(); ++c){
			euclideanLength = 0;
			for(int r = 0; r < matrix.getRowDimension(); ++r){
				euclideanLength += matrix.get(r, c) * matrix.get(r, c);
			}
			euclideanLength = Math.sqrt(euclideanLength);
			// finally, we normalize. 
			for(int r = 0; r < matrix.getRowDimension(); ++r){
				matrix.set(r, c, matrix.get(r, c) / euclideanLength);
			}
		}
	}

	/**
	 * Calculation using sublinear tf scaling. 
	 */
	static double computeTf(int tf){
		if (tf == 0)
			return 0;
		return 1 + Math.log10(tf);
	}

	/**
	 * Tokenizes using regex. 
	 * @param input
	 * @return Split using the {@link core.proto.inforet.SimilarityMatrix2#splitRegex}. 
	 */
	static String[] tokenize(String input){
		return input.split(splitRegex);
	}

	/**
	 * 
	 * @param input This is sorted in the process of building the string. 
	 * @param builder Used to construct the filter string. 
	 * @return
	 */
	static String buildFilterString(List<String> input, StringBuilder builder){
		builder.setLength(0);
		Collections.sort(input);
		for(String s : input){
			builder.append(s);
			builder.append(delimiter);
		}
		return builder.toString();
	}

	/**
	 * Input is the file of newline separated strings representing sets of words
	 * on edges to ignore. We convert them into strings so they 
	 * @param wordSetFilePath
	 * @param collector
	 */
	static void readIgnoredEdges(String wordSetFilePath, Set<String> collector){
		Set<String> lines = new HashSet<String>();
		readWordList(wordSetFilePath, lines);
		List<String> words = new LinkedList<String>();  
		StringBuilder builder = new StringBuilder();
		for(String s : lines){
			String[] tokens = s.split("\\s+");
			if (tokens.length > 0){
				words.clear();
				for(String word : tokens)
					words.add(word);
				collector.add(buildFilterString(words, builder));
			}else{
				collector.add(s);
			}
		}

	}

	/**
	 * Reads the newline separated word list into the given set. 
	 * @param wordListFilePath
	 * @param collector
	 */
	static void readWordList(String wordListFilePath, Set<String> collector){
		FileInputStream fin = null;
		try{
			fin = new FileInputStream (wordListFilePath);
			BufferedReader reader = new BufferedReader(new InputStreamReader(fin));
			String stopWord;
			while ((stopWord = reader.readLine()) != null){
				stopWord = stopWord.trim();
				if (stopWord.length() > 0){
					collector.add(stopWord.toLowerCase());
				}
			}
		} catch (IOException e) {
			throw new Error(e);
		}finally{
			try {
				if (fin != null)
					fin.close();
			} catch (IOException e) {
			}
		}
	}
} 
