/** Naive NN-ZDD with TF-IDF
 * 
 * 
 * @author Yong Boon, Lim (yongboon.lim@gmail.com)
 */
package core;

import java.io.File;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;

import measure.DocumentFreq;

import util.DocUtils;

public class NNZDDTfIdf extends NNZDDBasic {
	// ZDD
	DocumentFreq _dF;
	
	int _noOfTF = 0;
	String _splitter = "\\.";
	String _delimiter = ".";

	public NNZDDTfIdf(String trainDataDir, int noOfTF) {
		super(trainDataDir);
		_noOfTF = noOfTF;
		_nnDescShort = "NN-ZDD-TFIDF-LUC-" + Integer.toString(_noOfTF);
		_nnDescLong = "Nearest Neighbor with ZDD TF-IDF (Using Lucene to retrieve doc)";
	}

	public NNZDDTfIdf(String trainDataDir) {
		this(trainDataDir, 3);
	}
	
	@Override
	public void init() {
		super.init();
		_dF = new DocumentFreq();
	}
	
	// Convert topic to ZDD
	@Override
	public int topic2Zdd(File file) {
		String fileContent = DocUtils.ReadFile(file);
		Map<Object, Double> wordCount = DocUtils.ConvertToFeatureMapFilter(fileContent);
		int zddTopic = _zdd.base();

		for (Map.Entry<Object, Double> me : wordCount.entrySet()) {
			// Extract Term Freq
			int tf = me.getValue().intValue();
			if (tf > _noOfTF) {
				tf = _noOfTF;
			}
			// Combine both stem word & term freq
			String nodeString = (String)me.getKey() + _delimiter +  Integer.toString(tf);
			// If the stem word have not been created as variable
			if (!_stemVarDict.containsKey(nodeString)) {
				int zddVar = _zdd.createVar();
				_stemVarDict.put(nodeString, zddVar);
			}
			// Update the document freq of the stem word
			_dF.put((String)me.getKey());
			zddTopic = changeWith(zddTopic, _stemVarDict.get(nodeString));
			
		}

		return zddTopic;
	}
	
	@Override
	public double reduceScore(int zdd, Map<Object, Double> targetWordCount) {
		// Empty Set
		if (zdd == 0)
			return _zddEmptyCost;
		// Base Set
		if (zdd == 1)
			return _zddBaseCost;

		int zddLow = _zdd.subset0(zdd, _zdd.getVar(zdd));
		int zddHigh = _zdd.subset1(zdd, _zdd.getVar(zdd));
		
		// Calculate TF-IDF
//		String[] nodeWord = _stemVarInvDict.get(_zdd.getVar(zdd)).split(_splitter);
//		String word = nodeWord[0];
//		double tf = Double.parseDouble(nodeWord[1]);
//		
		// Extract the stem word and term freq of a node
		StringTokenizer st = new StringTokenizer(_stemVarInvDict.get(_zdd.getVar(zdd)), _splitter);
		String word = st.nextToken();
		double tf = Double.parseDouble(st.nextToken());
//		System.out.println(word + ", " + tf + ", " + _dF.getIDF(word) + ", " + _dF.getDF(word) + ", " + tf * _dF.getIDF(word));
		double zddContain = (targetWordCount.containsKey(word)) ? tf * _dF.getIDF(word) + 10 * _noOfTF: 0;

		double zddHighCost = reduceScore(zddHigh, targetWordCount) + zddContain;
		double zddLowCost = reduceScore(zddLow, targetWordCount);

		double zddCost = 0;
		boolean maxBranch;

		if (zddHighCost > zddLowCost) {
			zddCost = zddHighCost;
			maxBranch = true;
		} else {
			zddCost = zddLowCost;
			maxBranch = false;
		}
		_nodeMaxBranchDict.put(zdd, maxBranch);
		
		return zddCost;
	}
	
	@Override
	public void extractSet(int zdd, Set<String> m) {
		int zddMaxBranch;

		if (!(zdd == 0 || zdd == 1)) {
			if (_nodeMaxBranchDict.get(zdd)) {
//				String keyWord = _stemVarInvDict.get(_zdd.getVar(zdd)).split(_splitter)[0];
				StringTokenizer st = new StringTokenizer(_stemVarInvDict.get(_zdd.getVar(zdd)), _splitter);
				String keyWord = st.nextToken();
				m.add(keyWord);
				zddMaxBranch = _zdd.subset1(zdd, _zdd.getVar(zdd));
			} else {
				zddMaxBranch = _zdd.subset0(zdd, _zdd.getVar(zdd));
			}
			extractSet(zddMaxBranch, m);
		}
	}
	
	@Override
	public void clear() {
		super.clear();
		_dF = null;
	}
	
	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub
		String trainDataDir = "data/more_newsgroups/alt.atheism";
		String targetFile = trainDataDir + "/53141";
		NNZDDTfIdf nnZDD = new NNZDDTfIdf(trainDataDir);
		nnZDD.init();
		nnZDD.build();
		nnZDD.query(targetFile);
//		System.out.println(nnZDD.set2String(nnZDD.extractSet()));

	}

}
