/** NN-ZDD Multihash
 * 
 * 
 * @author Yong Boon, Lim (yongboon.lim@gmail.com)
 */

package core;

import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.Map.Entry;

import jdd.zdd.ZDD;

import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;

import search.FileFinder;
import util.DocUtils;
import util.HammingDist;
import core.NNMultiHash.MultiHashNode;

public class NNZDDMultiHash extends NN {
	
	// MultiHash Table
	// Represent Level, Table Id, Hash Value Set
	class MultiHashNode {
		HashSet<String> _nodeId;
		// Root Node (0), Intermediate Node (1), and leaf Node (2)
		Integer _nodeType;
		Integer _level;
		HashMap<HashSet<String>, MultiHashNode> _childList; 
		
		// ZDD
		ZDD _zdd = null;
		int _zddRoot;
		BiMap<String, Integer> _stemVarDict;
		BiMap<Integer, String> _stemVarInvDict;
		HashMap<Integer, Boolean> _nodeMaxBranchDict;
		final int _zddBaseCost = 1;
		final int _zddEmptyCost = -2; 

		// Constructor
		MultiHashNode(HashSet<String> nodeId, Integer level, Integer nodeType) {
			_nodeId = new HashSet<String>(nodeId);
			_level = level;
			_nodeType = nodeType;
			_childList = new HashMap<HashSet<String>, MultiHashNode>();
		}
		
		int changeWith(int zddSet, int wordVar) {
			int tmp = _zdd.ref(_zdd.change(zddSet, wordVar));
			_zdd.deref(zddSet);
			return tmp;
		}

		int unionWith(int zddSetA, int zddSetB) {
			int tmp = _zdd.ref(_zdd.union(zddSetA, zddSetB));
			_zdd.deref(zddSetA);
			return tmp;
		}
		
		// Convert Hash Code to ZDD
		int hash2Zdd(HashSet<String> childId) {
			int zddTmp = _zdd.base();
			for (String s : childId) {
				// If the stem word have not been created as variable
				if (!_stemVarDict.containsKey(s)) {
					int zddVar = _zdd.createVar();
					_stemVarDict.put((String) s, zddVar);
				}
				zddTmp = changeWith(zddTmp, _stemVarDict.get(s));
			}
			return zddTmp;
		}
		
		// Put hashcode ZDD into Big ZDD set
		void putZDD(HashSet<String> childId) {
			if (_zdd == null) {
				// ZDD
				_zdd = new ZDD(1000, 1000);
				_zddRoot = _zdd.base();
				_stemVarDict = HashBiMap.create();
				_stemVarInvDict = _stemVarDict.inverse();
				_nodeMaxBranchDict = new HashMap<Integer, Boolean>();
			}
			
			
			int zddHash;
			zddHash = hash2Zdd(childId);
			_zddRoot = unionWith(_zddRoot, zddHash);
		}
		
		protected double reduceScoreMain(int zdd, HashSet<String> childId) {
			_nodeMaxBranchDict = new HashMap<Integer, Boolean>();
			return reduceScore(zdd, childId);
		}

		public double reduceScore(int zdd, HashSet<String> childId) {
			// Empty Set
			if (zdd == 0)
				return _zddEmptyCost;
			// Base Set
			if (zdd == 1)
				return _zddBaseCost;

			int zddLow = _zdd.subset0(zdd, _zdd.getVar(zdd));
			int zddHigh = _zdd.subset1(zdd, _zdd.getVar(zdd));
			
			// Calculate hamming distance
			int zddContain = (childId.contains(_stemVarInvDict.get(_zdd.getVar(zdd)))) ? 1 : 0;
			double zddHighCost = reduceScore(zddHigh, childId) + zddContain;
			double zddLowCost = reduceScore(zddLow, childId);

			double zddCost = 0;
			boolean maxBranch;

			if (zddHighCost > zddLowCost) {
				zddCost = zddHighCost;
				maxBranch = true;
			} else {
				zddCost = zddLowCost;
				maxBranch = false;
			}
			_nodeMaxBranchDict.put(zdd, maxBranch);
			return zddCost;
		}
		
		public void extractSet(int zdd, HashSet<String> m) {
			int zddMaxBranch;

			if (!(zdd == 0 || zdd == 1)) {
				if (_nodeMaxBranchDict.get(zdd)) {
					String keyWord = _stemVarInvDict.get(_zdd.getVar(zdd));
					m.add(keyWord);
					zddMaxBranch = _zdd.subset1(zdd, _zdd.getVar(zdd));
				} else {
					zddMaxBranch = _zdd.subset0(zdd, _zdd.getVar(zdd));
				}
				extractSet(zddMaxBranch, m);
			}
		}

		
		boolean contains(HashSet<String> childId) {
			return _childList.containsKey(childId); 
		}
		
		void put(HashSet<String> childId) {
			if (!_childList.containsKey(childId)) {
				Integer childNodeType;
				if (_level + 1 == _hashLevel) {
					childNodeType = 2;
				} else {
					childNodeType = 1;
					
				}
				
				if (childNodeType != 2) {
					putZDD(childId);
				}
				_childList.put(new HashSet<String>(childId), new MultiHashNode(childId, _level + 1, childNodeType));
				
			}
		}	
		
		// TODO : Error handling for no exist
		MultiHashNode get(HashSet<String> childId) {
			return _childList.get(childId); 
		}
		
		// Find the nearest neighbour node base on the hashcode
		MultiHashNode nn(HashSet<String> targetSet) { 
			double sim = reduceScoreMain(_zddRoot, targetSet);
			HashSet<String> setExtract = new HashSet<String>();
			extractSet(_zddRoot, setExtract);
			MultiHashNode minMultiHashNode = _childList.get(setExtract);
			
			return minMultiHashNode;
		}
		
		String nnFile(Set<Object> targetSet) {
			String minFileName = "";
			int minDist = Integer.MAX_VALUE;
			for (Entry<HashSet<String>, MultiHashNode> e : _childList.entrySet()) {				
				for (String s : e.getKey()) {
					File sourceFile = new File(s);
					Map<Object, Double> sourceWordCount = DocUtils.ConvertToFeatureMapFilter(DocUtils.ReadFile(sourceFile));
					int dist = HammingDist.calcG(sourceWordCount.keySet(), targetSet);
					
					if (minDist > dist) {
						minDist = dist;
						minFileName = s;
					}
				}						
			}	
			return minFileName;
		}


	}
	
	int _hashLevel;
	int _randomSeed = 0;
	Random _randomGen = new Random(_randomSeed);
	MultiHashNode _multiHashNode;
	HashSet<String> _rootHashSet;

	public NNZDDMultiHash(String trainDataDir, int hashLevel) {
		super(trainDataDir);
		_nnDescShort = "NN-ZDD-MultiHash-" + Integer.toString(hashLevel);
		_nnDescLong = "Nearest Neighbor with ZDD MultiHash";
		_hashLevel = hashLevel;
	}
	
	@Override
	public void init() {
		super.init();
		HashSet<String> _rootHashSet = new HashSet<String>();
		_rootHashSet.add("-1");
		_multiHashNode = new MultiHashNode(_rootHashSet, 0, 0);
	}
	
	// Use the hashcode of the original string, and take n-bits from the rights
	protected static int hashKey(String corpus, int bits) {
		int hashStr = corpus.hashCode();
		int bitmask ;
		if (bits < 32) {
			bitmask = (1 << bits) - 1;
		} else {
			bitmask = 0xFFFFFFFF;
		}
        return hashStr & bitmask;
	}
	
	// Hash Function for the stem word
	private String hashWord(String stemWord, int hashLenght) {
		return Integer.toString(hashKey(stemWord, hashLenght + 5));
	}

	
	@Override
	public void build() {
		
		ArrayList<File> files = FileFinder.GetAllFiles( _trainDataDir, "", true);			
		for (File f : files) {
			String fileContent = DocUtils.ReadFile(f);
			Map<Object, Double> wordCount = DocUtils.ConvertToFeatureMapFilter(fileContent);
			
			HashMap<Integer, HashSet<String>> hashFileAllLevel = new HashMap<Integer, HashSet<String>>();
			// Convert the document to hash for different hash level
			for (int hashLevel = 0; hashLevel < _hashLevel; hashLevel++) {
				
				HashSet<String> hashFile = new HashSet<String>();
				
				if (hashLevel < _hashLevel - 1) {
					
					for (Entry<Object, Double> e : wordCount.entrySet()) {
						String stemWord = (String)e.getKey();
						String stemWordHash = hashWord(stemWord, hashLevel);
						hashFile.add(stemWordHash);
					}					
				} else {
					// Last Layer put the filename
					hashFile.add(f.getPath());
				}
				hashFileAllLevel.put(hashLevel, hashFile);
			}
			// put the document hash to different hash level
			MultiHashNode multiHashNodeTmp = _multiHashNode;
			for (int hashLevel = 0; hashLevel < _hashLevel; hashLevel++) {
				HashSet<String> childId = hashFileAllLevel.get(hashLevel);
				if (!multiHashNodeTmp.contains(childId)) {
					multiHashNodeTmp.put(childId);
				}
				multiHashNodeTmp = multiHashNodeTmp.get(childId);				 
			}				
		}
	}
				
	public String query(String targetDataName) {
		File targetFile = new File(targetDataName);
		Map<Object, Double> targetWordCount = DocUtils.ConvertToFeatureMapFilter(DocUtils.ReadFile(targetFile));
		
		HashMap<Integer, HashSet<String>> targethashFileAllLevel = new HashMap<Integer, HashSet<String>>();
		// Build the hash for each hashlevel which use for comparison @ later stage.
		for (int hashLevel = 0; hashLevel < _hashLevel - 1; hashLevel++) {
			HashSet<String> targetHashFile = new HashSet<String>();
			for (Entry<Object, Double> e : targetWordCount.entrySet()) {
				String stemWord = (String)e.getKey();
				String stemWordHash = hashWord(stemWord, hashLevel);
				targetHashFile.add(stemWordHash);				
			}
			targethashFileAllLevel.put(hashLevel, targetHashFile);
		}
		
		MultiHashNode minMultiHashNode = _multiHashNode;
		
		for (int hashLevel = 0; hashLevel < _hashLevel - 1; hashLevel++) {
			minMultiHashNode = minMultiHashNode.nn(targethashFileAllLevel.get(hashLevel));
		}
		
		// Calculate the closet files
		String minFileName = minMultiHashNode.nnFile(targetWordCount.keySet());
		return minFileName;
	}
	


	public int size() {
		return 0;
	}
	
	public void clear() {
		_randomGen = null;
		_multiHashNode = null; 
		_rootHashSet = null;
	}

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub
//		String trainDataDir = "data/more_newsgroups/alt.atheism";
//		String trainDataDir = "data/test_data1";
//		String targetFile = trainDataDir + "/00000.txt";
		String trainDataDir = "data/test_data2";
		String targetFile = trainDataDir + "/57110";
//		NNMultiHash nn = new NNMultiHash(trainDataDir, 10, 4);
		NNZDDMultiHash nn = new NNZDDMultiHash(trainDataDir, 4);
		nn.init();
		nn.build();
		nn.query(targetFile);
	}
}
