/** LSH NN Search
 *  Implement based on Chapter 3 for  Rajaraman, Anand, and Jeffrey David Ullman. 
 *  Mining of massive datasets. Cambridge University Press, 2011.
 * Credit to Sahil Thaker LSH Code on .NET for the second hashing part
 * http://blogs.msdn.com/b/spt/archive/2008/06/11/locality-sensitive-hashing-lsh-and-min-hash.aspx
 * 
 * @author Yong Boon, Lim (yongboon.lim@gmail.com)
 */

package core;

import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;

import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;

import search.FileFinder;
import util.DocUtils;

public class NNLSH extends NN {
	// MinHash Variables
	HashMap<String, Integer> _stemIdDict;		// Stem Word Unique Id
	int _hashSize;								// # of Hash Function
	int _noOfBand;
	int _noOfRowPerBand;
	int _randomSeed = 123;
	Random _randomGen = new Random(_randomSeed);
	int[][] _hashFuncParameter;					// Hash Function Parameter
	Multimap<Integer, String> _lshBucket;
	HashMap<String, Integer[]>_lshFile;
	// ZDD
	// Lucene
	
	
	public NNLSH(String trainDataDir, int hashSize, int noOfRowPerBand) {
		super(trainDataDir);
		_nnDescShort = "NN-LSH-MINHASH-" + Integer.toString(hashSize);
		_nnDescLong = "Nearest Neighbor with LSH MinHash";
		_hashSize = hashSize;
		_noOfRowPerBand = noOfRowPerBand;
		_noOfBand = _hashSize/_noOfRowPerBand;
	}	

	@Override
	public void init() {
		super.init();
		_stemIdDict = new HashMap<String, Integer>();
		_lshBucket = ArrayListMultimap.create();
		_lshFile = new HashMap<String, Integer[]>();
	}
	
	// Scan all the doc stem word and assign unique ID, store in dict _stmIdDict
	protected void generateStemId() {
		ArrayList<File> files = FileFinder.GetAllFiles( _trainDataDir, "", true);
		HashMap<String, Integer> stemIdDict = new HashMap<String, Integer>();
		int stemId = 0;

		for (File f : files) {
			String fileContent = DocUtils.ReadFile(f);
			Map<Object, Double> wordCount = DocUtils.ConvertToFeatureMapFilter(fileContent);
			
			for (Map.Entry<Object, Double> me : wordCount.entrySet()) {
				if (!stemIdDict.containsKey((String)me.getKey())) {
					stemIdDict.put((String)me.getKey(), stemId);
					stemId++;
				}
			}
		}
		
		_stemIdDict = stemIdDict;
	}

	// Generate a and b for HashFunction (ax + b) mod hashSize
	public void generateHashFuncPar() {
		_hashFuncParameter = new int[_hashSize][2];
		for (int i = 0; i < _hashSize; i++) {
			_hashFuncParameter[i][0] = _randomGen.nextInt(3 * _hashSize) + 1;	// Generate the multiplier between 1 and 3 * _hashSize 
			_hashFuncParameter[i][1] = _randomGen.nextInt(10) + 1;		// Generate the addition term between 1 and 10
			
		}
	}
	
	// HashFunction (a*x + b) % c
	protected int hashFunction(int a, int b, int c, int x) {
		return (a * x + b) % c;
	}
	
	// Initialize the Hash Function Matrix
	private Integer[] initHashFuncValueFile(int hashSize) {
		Integer[] hashFuncValueFile = new Integer[hashSize];
		for (int i = 0; i < hashSize; i++) {
			hashFuncValueFile[i] = Integer.MAX_VALUE;
		}
		return hashFuncValueFile; 		
	}		
	
	// Convert file to hash set
	protected Integer[] file2Hash(File f){
		String fileContent = DocUtils.ReadFile(f);
		Map<Object, Double> wordCount = DocUtils.ConvertToFeatureMapFilter(fileContent);
		HashMap<Integer, Integer> hashFuncValue = new HashMap<Integer, Integer>();
		Integer[] hashFuncValueFile = initHashFuncValueFile(_hashSize);
		for (int i = 0; i < _hashSize; i++) {
			for (Entry<Object, Double> e : wordCount.entrySet()) {			
				// Calculate hashValue using (ax + b)%c
				int hashValue = hashFunction(_hashFuncParameter[i][0], _hashFuncParameter[i][1], _stemIdDict.size(), _stemIdDict.get((String)e.getKey()));
				// If the value available, compare, and take the smaller one
				if (hashFuncValueFile[i] > hashValue) {
					hashFuncValueFile[i] = hashValue;
				}
			}				
		}
		return hashFuncValueFile;
	}
	
	@Override
	public void build() {
		generateStemId();
		generateHashFuncPar();
		
		ArrayList<File> files = FileFinder.GetAllFiles( _trainDataDir, "", true);			
		
		for (File f : files) {
			Integer[] hashFuncValue = file2Hash(f);
			for (int bandIdx = 0; bandIdx < _noOfBand; bandIdx++) {
				int sumHash = 0;
				for (int rowOffset = 0; rowOffset < _noOfRowPerBand; rowOffset++) {
					sumHash += hashFuncValue[bandIdx * _noOfRowPerBand + rowOffset]; 
				}
				_lshBucket.put(sumHash, f.getPath());
			}
							
			// Add Signature Matrix file
			_lshFile.put(f.getPath(), hashFuncValue);
		}
	}
	
	// Calculate the Jaccord Similarity
	private double similarityJaccord(Integer[] a, Integer[] b) {
		int matchCount = 0;
		for (int i = 0; i < a.length; i++) {
			if (a[i] == b[i]) {
				matchCount++;
			}
		}
		return 1.0 * matchCount/a.length;
	}
	
	public String query(String targetDataName) {
		File targetFile = new File(targetDataName);
		Integer[] hashFuncValue = file2Hash(targetFile);
		HashSet<String> candidate = new HashSet<String>();
		
		for (int bandIdx = 0; bandIdx < _noOfBand; bandIdx++) {
			int sumHash = 0;
			for (int rowOffset = 0; rowOffset < _noOfRowPerBand; rowOffset++) {
				sumHash += hashFuncValue[bandIdx * _noOfRowPerBand + rowOffset]; 
			}
			
			if (_lshBucket.containsKey(sumHash)) {
				for (String s : _lshBucket.get(sumHash)) {
					candidate.add(s);
				}
			}
		}
		
		// Calculate the minHash
		String similarFilename = "";
		double similarityOfMinIndex = 0.0;
		for (String s : candidate) {
			double similarity = similarityJaccord(_lshFile.get(s), hashFuncValue);
			if (similarity > similarityOfMinIndex) {
				similarityOfMinIndex = similarity;
				similarFilename = s;
			}
		}
		
		return similarFilename;
	}
	
	public int size() {
		return 0;
	}
	
	public void clear() {
		_stemIdDict = null;
		_hashFuncParameter = null;
		_lshBucket = null;
		_lshFile = null;
	}
	
	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub

	}

}
