/** Nearest Neighbor with ZDD LSH 
 * 
 * 
 * @author Yong Boon, Lim (yongboon.lim@gmail.com)
 */

package core;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;
import java.util.Random;
import java.util.TreeSet;

import jdd.zdd.ZDD;

import com.google.common.collect.BiMap;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.Multimap;
import com.google.common.collect.Table;

import search.FileIndexBuilder;
import search.FileIndexBuilderRaw;
import search.SimpleSearchRanker;
import search.SimpleSearchRankerKeywords;
import util.DocUtils;
import util.FileFinder;
import util.HammingDist;

public class NNZDDLSH extends Core {
	// MinHash Variables
	HashMap<String, Integer> _stemIdDict;		// Stem Word Unique Id
	int _hashSize;								// # of Hash Function
	int _noOfBand;
	int _noOfRowPerBand;
	int _randomSeed = 123;
	Random _randomGen = new Random(_randomSeed);
	int[][] _hashFuncParameter;					// Hash Function Parameter
	HashMap<Integer, ZDDMeta> _lshBucket;
	
	// ZDD	
	private class ZDDMeta {
		int zddRoot;
		ZDD zdd;
		BiMap<String, Integer> stemVarDict;
		BiMap<Integer, String> stemVarInvDict;

		private ZDDMeta() {
			zdd = new ZDD(1000, 100);
			stemVarDict = HashBiMap.create();
			stemVarInvDict = stemVarDict.inverse();
		}
	}
	HashMap<Integer, Boolean> _nodeMaxBranchDict;
	final int _zddBaseCost = 1;
	final int _zddEmptyCost = -2; 
	
	// Lucene
	String _indexPath = "lucene.index";
	String default_field = "CONTENT";
	FileIndexBuilder _fileIndexBuilder;
	SimpleSearchRanker _luceneSearch;
	FileIndexBuilderRaw _fileIndexBuilderRaw;
	
	public NNZDDLSH(String trainDataDir, int hashSize, int noOfRowPerBand) {
		super(trainDataDir);
		_nnDescShort = "NN-ZDD-LSH-LUC-" + Integer.toString(hashSize);
		_nnDescLong = "Nearest Neighbor with ZDD LSH (Using Lucene to retrieve doc)";
		_hashSize = hashSize;
		_noOfRowPerBand = noOfRowPerBand;
		_noOfBand = _hashSize/_noOfRowPerBand;
	}	
	
	@Override
	public void init() {
		_stemIdDict = new HashMap<String, Integer>();
		_lshBucket = new HashMap<Integer, ZDDMeta>();
	}
	
	// Scan all the doc stem word and assign unique ID, store in dict _stmIdDict
	protected void generateStemId() {
		ArrayList<File> files = FileFinder.GetAllFiles( _trainDataDir, "", true);
		HashMap<String, Integer> stemIdDict = new HashMap<String, Integer>();
		int stemId = 0;

		for (File f : files) {
			String fileContent = DocUtils.ReadFile(f);
			Map<Object, Double> wordCount = DocUtils.ConvertToFeatureMapFilter(fileContent);
			
			for (Map.Entry<Object, Double> me : wordCount.entrySet()) {
				if (!stemIdDict.containsKey((String)me.getKey())) {
					stemIdDict.put((String)me.getKey(), stemId);
					stemId++;
				}
			}
		}
		
		_stemIdDict = stemIdDict;
	}
	
	// Generate a and b for HashFunction (ax + b) mod hashSize
	public void generateHashFuncPar() {
		_hashFuncParameter = new int[_hashSize][2];
		for (int i = 0; i < _hashSize; i++) {
			_hashFuncParameter[i][0] = _randomGen.nextInt(3 * _hashSize) + 1;	// Generate the multiplier between 1 and 3 * _hashSize 
			_hashFuncParameter[i][1] = _randomGen.nextInt(10) + 1;		// Generate the addition term between 1 and 10
			
		}
	}

	// Convert topic to ZDD
	public int topic2Zdd(Set<String> hashFuncValueFile, ZDDMeta zddMeta) {
		int zddTopic = zddMeta.zdd.base();
		for (String s : hashFuncValueFile) {
			if (!zddMeta.stemVarDict.containsKey(s)) {
				int zddVar = zddMeta.zdd.createVar();
				zddMeta.stemVarDict.put(s, zddVar);
			}
			zddTopic = changeWith(zddTopic, zddMeta.stemVarDict.get(s), zddMeta);
		}
		return zddTopic;
	}
	
	// HashFunction (a*x + b) % c
	protected int hashFunction(int a, int b, int c, int x) {
		return (a * x + b) % c;
	}
	

	// Initialize
	private Integer[] initHashFuncValueFile(int hashSize) {
		Integer[] hashFuncValueFile = new Integer[hashSize];
		for (int i = 0; i < hashSize; i++) {
			hashFuncValueFile[i] = Integer.MAX_VALUE;
		}
		return hashFuncValueFile; 		
	}	
	
	// Convert file to hash set
	// Hash Set has the format of "x-y" where x represent the hash function id and y represent the hash value
	protected Integer[] file2Hash(File f){
		String fileContent = DocUtils.ReadFile(f);
		Map<Object, Double> wordCount = DocUtils.ConvertToFeatureMapFilter(fileContent);
		HashMap<Integer, Integer> hashFuncValue = new HashMap<Integer, Integer>();
		Integer[] hashFuncValueFile = initHashFuncValueFile(_hashSize);
		for (int i = 0; i < _hashSize; i++) {
			for (Entry<Object, Double> e : wordCount.entrySet()) {			
				// Calculate hashValue using (ax + b)%c
				int hashValue = hashFunction(_hashFuncParameter[i][0], _hashFuncParameter[i][1], _stemIdDict.size(), _stemIdDict.get((String)e.getKey()));
				// If the value available, compare, and take the smaller one
				if (hashFuncValueFile[i] > hashValue) {
					hashFuncValueFile[i] = hashValue;
				}
			}				
		}
		return hashFuncValueFile;
	}
	
	// Convert the hashFuncValue of a file from Array of Integer to Set of String with X-Y format 
	// where X is the hash Function Index, and Y is the hash Value. 
	private TreeSet<String> hashFuncValueFileConverter(Integer[] hashFuncValue) {
		TreeSet<String> hashFuncValueFile = new TreeSet<String>();
		
		for (int i = 0; i < hashFuncValue.length; i++) {
			hashFuncValueFile.add(Integer.toString(i) + "-" + Integer.toString(hashFuncValue[i]));
		}		
		return hashFuncValueFile;
	}
	
	
	private double reduceScoreMain(ZDDMeta zddMeta, Set<String> hashFuncValueFile) {
		_nodeMaxBranchDict = new HashMap<Integer, Boolean>();
		return reduceScore(zddMeta.zddRoot, hashFuncValueFile, zddMeta);
	}

	private double reduceScore(int zdd, Set<String> hashFuncValueTarget, ZDDMeta zddMeta) {
		// Empty Set
		if (zdd == 0)
			return _zddEmptyCost;
		// Base Set
		if (zdd == 1)
			return _zddBaseCost;

		int zddLow = zddMeta.zdd.subset0(zdd, zddMeta.zdd.getVar(zdd));
		int zddHigh = zddMeta.zdd.subset1(zdd, zddMeta.zdd.getVar(zdd));
		
		// Calculate hamming distance
		int zddContain = (hashFuncValueTarget.contains(zddMeta.stemVarInvDict.get(zddMeta.zdd.getVar(zdd)))) ? 1 : 0;

		double zddHighCost = reduceScore(zddHigh, hashFuncValueTarget, zddMeta) + zddContain;
		double zddLowCost = reduceScore(zddLow, hashFuncValueTarget, zddMeta);

		double zddCost = 0;
		boolean maxBranch;

		if (zddHighCost > zddLowCost) {
			zddCost = zddHighCost;
			maxBranch = true;
		} else {
			zddCost = zddLowCost;
			maxBranch = false;
		}
		_nodeMaxBranchDict.put(zdd, maxBranch);

		return zddCost;
	}
	
	public void extractSet(int zdd, Set<String> m, ZDDMeta zddMeta) {
		int zddMaxBranch;

		if (!(zdd == 0 || zdd == 1)) {
			if (_nodeMaxBranchDict.get(zdd)) {
				String keyWord = zddMeta.stemVarInvDict.get(zddMeta.zdd.getVar(zdd));
				m.add(keyWord);
				zddMaxBranch = zddMeta.zdd.subset1(zdd, zddMeta.zdd.getVar(zdd));
			} else {
				zddMaxBranch = zddMeta.zdd.subset0(zdd, zddMeta.zdd.getVar(zdd));
			}
			extractSet(zddMaxBranch, m, zddMeta);
		}
	}

	protected String set2String(Set<String> m) {
		String result = "";
		for (String e : m) {
			result = result + " " + e;
		}
		
		return result.trim();
	}
	
	protected int changeWith(int zddSet, int wordVar, ZDDMeta zddMeta) {
		int tmp = zddMeta.zdd.ref(zddMeta.zdd.change(zddSet, wordVar));
		zddMeta.zdd.deref(zddSet);
		return tmp;
	}
	
	protected int unionWith(int zddSetA, int zddSetB, ZDDMeta zddMeta) {
		int tmp = zddMeta.zdd.ref(zddMeta.zdd.union(zddSetA, zddSetB));
		zddMeta.zdd.deref(zddSetA);
		return tmp;
	}
	
	@Override
	public void build() {
		try {
			generateStemId();
			generateHashFuncPar();
			
			ArrayList<File> files = FileFinder.GetAllFiles( _trainDataDir, "", true);
			
			_fileIndexBuilderRaw = new FileIndexBuilderRaw(_indexPath);
			_luceneSearch = new SimpleSearchRankerKeywords(_indexPath, default_field, _fileIndexBuilderRaw._analyzer);
			int numFileProcessed = 0;
			
			for (File f : files) {
				Integer[] hashFuncValue = file2Hash(f);
				TreeSet<String> hashFuncValueFile = hashFuncValueFileConverter(hashFuncValue);
				for (int bandIdx = 0; bandIdx < _noOfBand; bandIdx++) {
					int sumHash = 0;
					for (int rowOffset = 0; rowOffset < _noOfRowPerBand; rowOffset++) {
						sumHash += hashFuncValue[bandIdx * _noOfRowPerBand + rowOffset]; 
					}
					
					int zddTmp;
					ZDDMeta zddMeta;
					if (_lshBucket.containsKey(sumHash)) {
 						zddMeta = _lshBucket.get(sumHash);
 						zddTmp = zddMeta.zddRoot;
					} else {
						zddMeta = new ZDDMeta();
						_lshBucket.put(sumHash, zddMeta);
						zddTmp = zddMeta.zdd.base();
					}
					// ZDD Variable
					int zddTopic;
					// ZDD Union
					zddTopic = topic2Zdd(hashFuncValueFile, zddMeta);
					zddTmp = unionWith(zddTmp, zddTopic, zddMeta);
					zddMeta.zddRoot = zddTmp;
				}

				// Add to Lucene
				_fileIndexBuilderRaw.addFile(f.getPath(), hashFuncValueFile);
					
				numFileProcessed++;
				if (numFileProcessed%1000 == 0) {
					System.out.println("File Processed: " + numFileProcessed);
				}

			}
		} catch (IOException e) {
			e.printStackTrace(System.err);
		} finally {
			_fileIndexBuilderRaw.close();
		}
	}
		
	@Override
	public String query(String targetDataName) {
		File targetFile = new File(targetDataName);
		Integer[] hashFuncValue = file2Hash(targetFile);
		TreeSet<String> hashFuncValueFile = hashFuncValueFileConverter(hashFuncValue);
		HashSet<String> candidate = new HashSet<String>();
		
		try {
			for (int bandIdx = 0; bandIdx < _noOfBand; bandIdx++) {
				int sumHash = 0;
				for (int rowOffset = 0; rowOffset < _noOfRowPerBand; rowOffset++) {
					sumHash += hashFuncValue[bandIdx * _noOfRowPerBand + rowOffset]; 
				}
				
				if (_lshBucket.containsKey(sumHash)) {
					ZDDMeta zddMeta = _lshBucket.get(sumHash);
					double sim = reduceScoreMain(zddMeta, hashFuncValueFile);
					Set<String> setExtract = new TreeSet<String>();
					extractSet(zddMeta.zddRoot, setExtract, zddMeta);
//					candidate.addAll(_luceneSearch.doSearchSet(set2String(setExtract)));
				}
			}
//			System.out.println(candidate.size());
			// Calculate the minHash
			String closestFilename = null;
			int dist;
			int minDist = Integer.MAX_VALUE;
//			Map<Object, Double> TargetWordCount = DocUtils.ConvertToFeatureMapFilter(DocUtils.ReadFile(targetFile));
//			for (String s : candidate) {
//				File sourceFile = new File(s);
//				Map<Object, Double> SourceWordCount = DocUtils.ConvertToFeatureMapFilter(DocUtils.ReadFile(sourceFile));
//				dist = HammingDist.calc(TargetWordCount.keySet(), SourceWordCount.keySet());
//				if (dist < minDist) {
//					minDist = dist;
//					closestFilename = s;
//				}			
//			}
//			System.out.println(closestFilename );
			return closestFilename;
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return "";
	}
	
	@Override
	public int size() {
		int totNodeSize = 0;
		for (Entry<Integer, ZDDMeta> e : _lshBucket.entrySet()) {
			totNodeSize += e.getValue().zdd.nodeCount(e.getValue().zddRoot); 
		}
		return totNodeSize;
	}


	@Override
	public void clear() {
		_stemIdDict = null;
		_hashFuncParameter = null;
		_lshBucket = null;
		// ZDD
		_nodeMaxBranchDict = null;
		// Lucene
		_fileIndexBuilderRaw = null;
		_fileIndexBuilder = null;
		_luceneSearch = null;
	}
	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub
	}
}
