/** Naive NN-ZDD with Minhash
 * 
 * 
 * @author Yong Boon, Lim (yongboon.lim@gmail.com)
 */
package core;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;
import java.util.Random;
import java.util.TreeSet;

import jdd.zdd.ZDD;

import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Table;

import search.FileIndexBuilderRaw;
import search.SimpleSearchRanker;
import util.DocUtils;
import util.FileFinder;

public class NNZDDMinHash extends NNZDDBasic {
	// MinHash Variables
	HashMap<String, Integer> _stemIdDict;		// Stem Word Unique Id
	int _hashSize;								// # of Hash Function
	int _randomSeed = 123;
	Random _randomGen = new Random(_randomSeed);
	int[][] _hashFuncParameter;					// Hash Function Parameter
	// ZDD
	// Lucene
	FileIndexBuilderRaw _fileIndexBuilderRaw;
	
	public NNZDDMinHash(String trainDataDir, int hashSize) {
		super(trainDataDir);
		_nnDescShort = "NN-ZDD-MINHASH-LUC-" + Integer.toString(hashSize);
		_nnDescLong = "Nearest Neighbor with ZDD MinHash (Using Lucene to retrieve doc)";
		_hashSize = hashSize;
	}	
	
	@Override
	public void init() {
		super.init();
		_stemIdDict = new HashMap<String, Integer>();
	}
	
	// Scan all the doc stem word and assign unique ID, store in dict _stmIdDict
	protected void generateStemId() {
		ArrayList<File> files = FileFinder.GetAllFiles( _trainDataDir, "", true);
		HashMap<String, Integer> stemIdDict = new HashMap<String, Integer>();
		int stemId = 0;

		for (File f : files) {
			String fileContent = DocUtils.ReadFile(f);
			Map<Object, Double> wordCount = DocUtils.ConvertToFeatureMapFilter(fileContent);
			
			for (Map.Entry<Object, Double> me : wordCount.entrySet()) {
				if (!stemIdDict.containsKey((String)me.getKey())) {
					stemIdDict.put((String)me.getKey(), stemId);
					stemId++;
				}
			}
		}
		
		_stemIdDict = stemIdDict;
	}
	
	// Generate a and b for HashFunction (ax + b) mod hashSize
	public void generateHashFuncPar() {
		_hashFuncParameter = new int[_hashSize][2];
		for (int i = 0; i < _hashSize; i++) {
			_hashFuncParameter[i][0] = _randomGen.nextInt(3 * _hashSize) + 1;	// Generate the multiplier between 1 and 3 * _hashSize 
			_hashFuncParameter[i][1] = _randomGen.nextInt(10) + 1;		// Generate the addition term between 1 and 10
			
		}
	}

	// Convert topic to ZDD
	public int topic2Zdd(Set<String> hashFuncValue) {
		int zddTopic = _zdd.base();
		for (String s : hashFuncValue) {
			if (!_stemVarDict.containsKey(s)) {
				int zddVar = _zdd.createVar();
				_stemVarDict.put(s, zddVar);
			}
			zddTopic = changeWith(zddTopic, _stemVarDict.get(s));
		}
		return zddTopic;
	}

	// Union all the topics' ZDD
	protected int zddTopicUnion() {
		// Do Nothing
		return 0;
	}
	
	// HashFunction (a*x + b) % c
	protected int hashFunction(int a, int b, int c, int x) {
		return (a * x + b) % c;
	}
	

	// Convert file to hash set
	// Hash Set has the format of "x-y" where x represent the hash function id and y represent the hash value
	protected Set<String> file2Hash(File f){
		String fileContent = DocUtils.ReadFile(f);
		Map<Object, Double> wordCount = DocUtils.ConvertToFeatureMapFilter(fileContent);
		HashMap<Integer, Integer> hashFuncValue = new HashMap<Integer, Integer>();
		TreeSet<String> hashFuncValieFile = new TreeSet<String>(); 
		for (Entry<Object, Double> e : wordCount.entrySet()) {
			for (int i = 0; i < _hashSize; i++) {
				// Calculate hashValue using (ax + b)%c
				int hashValue = hashFunction(_hashFuncParameter[i][0], _hashFuncParameter[i][1], _stemIdDict.size(), _stemIdDict.get((String)e.getKey()));
				// If the value available, compare, and take the smaller one
				if (hashFuncValue.containsKey(i)) {
					if (hashFuncValue.get(i) > hashValue) {
						hashFuncValue.put(i, hashValue);
					}
				} else {
					hashFuncValue.put(i, hashValue);
				}
			}				
		}

		for (Entry<Integer, Integer> e : hashFuncValue.entrySet()) {
			hashFuncValieFile.add(Integer.toString(e.getKey()) + "-" + Integer.toString(e.getValue()));			
		}
		return hashFuncValieFile;
	}

	@Override
	protected double reduceScoreMain(int zdd, String targetFilename) {
		File targetFile = new File(targetFilename);
		Set<String> hashFuncValue = file2Hash(targetFile); 
		_nodeMaxBranchDict = new HashMap<Integer, Boolean>();
//		System.out.println(targetFile.getPath());
//		for (String s : hashFuncValue) {
//			System.out.println(s);
//		}
		return reduceScore(zdd, hashFuncValue);
	}

	public double reduceScore(int zdd, Set<String> hashFuncValueTarget) {
//		System.out.println("ZDD-" + _stemVarInvDict.get(_zdd.getVar(zdd)));
		// Empty Set
		if (zdd == 0)
			return _zddEmptyCost;
		// Base Set
		if (zdd == 1)
			return _zddBaseCost;

		int zddLow = _zdd.subset0(zdd, _zdd.getVar(zdd));
		int zddHigh = _zdd.subset1(zdd, _zdd.getVar(zdd));
		
		// Calculate hamming distance
		int zddContain = (hashFuncValueTarget.contains(_stemVarInvDict.get(_zdd.getVar(zdd)))) ? 1 : 0;

		double zddHighCost = reduceScore(zddHigh, hashFuncValueTarget) + zddContain;
		double zddLowCost = reduceScore(zddLow, hashFuncValueTarget);

		double zddCost = 0;
		boolean maxBranch;

		if (zddHighCost > zddLowCost) {
			zddCost = zddHighCost;
			maxBranch = true;
		} else {
			zddCost = zddLowCost;
			maxBranch = false;
		}
		_nodeMaxBranchDict.put(zdd, maxBranch);

		return zddCost;
	}
	
	@Override
	public void build() {
		try {
			generateStemId();
			generateHashFuncPar();
			
			PrintStream ps  = new PrintStream(new FileOutputStream("log/output.txt"));
			
			ArrayList<File> files = FileFinder.GetAllFiles( _trainDataDir, "", true);
			// ZDD Variable
			int zddTopic;
			int zddTmp = _zdd.base();
			
			_fileIndexBuilderRaw = new FileIndexBuilderRaw(_indexPath);
			_luceneSearch = new SimpleSearchRanker(_indexPath, default_field, _fileIndexBuilderRaw._analyzer);
			int numFileProcessed = 0;
			
			for (File f : files) {
				Set<String> hashFuncValue = file2Hash(f);		
				ps.println(f.getPath() + " " + set2String(hashFuncValue));

				// ZDD Union
				zddTopic = topic2Zdd(hashFuncValue);
				zddTmp = unionWith(zddTmp, zddTopic);
				
				// Add to Lucene
				_fileIndexBuilderRaw.addFile(f.getPath(), hashFuncValue);
					
				numFileProcessed++;
				if (numFileProcessed%1000 == 0) {
					System.out.println("File Processed: " + numFileProcessed);
				}

			}
			_zddRoot = zddTmp;
			ps.close();
		} catch (IOException e) {
			e.printStackTrace(System.err);
		} finally {
			_fileIndexBuilderRaw.close();
			
		}
	}
		
	@Override
	public String query(String targetDataName) {
		double sim = reduceScoreMain(_zddRoot, targetDataName);
		Set<String> setExtract = new HashSet<String>();
		extractSet(_zddRoot, setExtract);
//		System.out.println("Query - " + set2String(setExtract));
		
		if (DEBUG) {
			System.out.println(set2String(setExtract));
		}
		
		try {			
			return _luceneSearch.doSearch(set2String(setExtract));
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return "";
	}

	@Override
	public void clear() {
		super.clear();
		_stemIdDict = null;
		_randomGen = null;
		_hashFuncParameter = null;
		// ZDD
		// Lucene
		_fileIndexBuilderRaw = null;
	}
	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub
		String trainDataDir = "data/more_newsgroups/alt.atheism";
//		String targetFile = trainDataDir + "/53141";
		String targetFile = "data/53519";
//		String trainDataDir = "data/test_data";
//		String trainDataDir = "data/test_data2";
//		String targetFile = trainDataDir + "/57110";
		NNZDDMinHash nnZDD = new NNZDDMinHash(trainDataDir, 20);
		nnZDD.init();
		nnZDD.build();
//		System.out.println(nnZDD._zdd.count(nnZDD._zddRoot));
		System.out.println(nnZDD.query(targetFile));
//		System.out.println(nnZDD.set2String(nnZDD.extractSet()));

	}

}
