/**
 * 
 */
package edu.umd.clip.lm.tools;

import java.io.*;
import java.nio.channels.FileChannel;
import java.util.*;

import edu.berkeley.nlp.util.*;

import edu.umd.clip.jobs.*;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.Context;
import edu.umd.clip.lm.model.data.ContextFuturesPair;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataReader;
import edu.umd.clip.lm.model.data.ReadableTrainingData;
import edu.umd.clip.lm.model.data.TrainingDataBlock;
import edu.umd.clip.lm.model.data.TrainingDataReader;
import edu.umd.clip.lm.model.data.TupleCountPair;
import edu.umd.clip.lm.questions.*;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.util.tree.*;
/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class ComputeLeafOverlap {

	public static class Options {
        @Option(name = "-config", required = true, usage = "XML config file")
		public String config;
        @Option(name = "-file1", required = true, usage = "Training data file 1")
		public String file1;
        @Option(name = "-file2", required = true, usage = "Training data file 2")
		public String file2;
        @Option(name = "-forest", required = false, usage = "LM ID to train (default: " + LanguageModel.PRIMARY_LM_ID + ")")
		public String lm = LanguageModel.PRIMARY_LM_ID;   
        @Option(name = "-jobs", usage = "number of concurrent jobs (default: 1)")
        public int jobs = 1;
	}
	/**
	 * @param args
	 */
	public static void main(String[] args) {
        OptionParser optParser = new OptionParser(Options.class);
        final Options opts = (Options) optParser.parse(args, true);

		Experiment.initialize(opts.config);
		Experiment experiment = Experiment.getInstance();
		experiment.buildPrefixes();
		
		JobManager.initialize(opts.jobs);
		Thread thread = new Thread(JobManager.getInstance(), "Job Manager");
		thread.setDaemon(true);
		thread.start();

		final LanguageModel lm = experiment.getLM(opts.lm);
		
		final Map<Integer,Long2IntMap> countsMap1 = new HashMap<Integer,Long2IntMap>(lm.getMaxClusters());
		final Map<Integer,Long2IntMap> countsMap2 = new HashMap<Integer,Long2IntMap>(lm.getMaxClusters());
		
		JobGroup group = JobManager.getInstance().createJobGroup("balh");
		Runnable run1 = new Runnable() {
			@Override
			public void run() {
				try {
					computeCounts(lm, opts.file1, countsMap1);
				} catch (IOException e) {
					e.printStackTrace();
				}	
			}
		};
		JobManager.getInstance().addJob(group, new Job(run1, "1"));
		
		Runnable run2 = new Runnable() {
			@Override
			public void run() {
				try {
					computeCounts(lm, opts.file2, countsMap2);
				} catch (IOException e) {
					e.printStackTrace();
				}	
			}
		};
		JobManager.getInstance().addJob(group, new Job(run2, "2"));
		group.join();
		
		int nrBuckets = 20;
		int wordOverLap[] = new int[nrBuckets];
		int eventOverLap[] = new int[nrBuckets];
		
		for(Map.Entry<Integer, Long2IntMap> entry : countsMap1.entrySet()) {
			int wordsMatched = 0;
			int eventsMatched = 0;
			
			Long2IntMap map1 = entry.getValue();
			Long2IntMap map2 = countsMap2.get(entry.getKey());
			if (map2 != null) {
				for(Long2IntMap.Iterator it = map1.iterator(); it.hasNext(); ) {
					Long2IntMap.Entry e = it.next();
					if (map2.get(e.getKey()) > 0) {
						wordsMatched += 1;
						eventsMatched += e.getValue();
					}
				}
			}
			if (wordsMatched == 0) wordsMatched = 1;
			wordOverLap[(wordsMatched * nrBuckets - 1) / map1.size()] += 1;
			if (eventsMatched == 0) eventsMatched = 1;
			eventOverLap[(int) ((eventsMatched * nrBuckets - 1) / map1.sumValues())] += 1;
		}
		
		for(int i = 0; i<nrBuckets; ++i) {
			System.out.printf("%3d -%3d%%:\t%g%%\t%g%%\n", 
					i*100/nrBuckets, (i+1)*100/nrBuckets,
					100.0*wordOverLap[i]/countsMap1.size(),
					100.0*eventOverLap[i]/countsMap1.size());
		}
	}

	private static void computeCounts(LanguageModel lm, String fname, Map<Integer,Long2IntMap> countsMap) throws IOException {
		FileChannel channel = new FileInputStream(fname).getChannel();
		TrainingDataReader reader = new OnDiskTrainingDataReader(channel);
		ReadableTrainingData data = new ReadableTrainingData(reader);
		
		long overtFactorsMask = Experiment.getInstance().getTupleDescription().getOvertFactorsMask();
		while(data.hasNext()) {
			TrainingDataBlock block = data.next();
			
			for(ContextFuturesPair pair : block) {
				Context ctx = pair.getContext();
				BinaryTree<HistoryTreePayload> tree = lm.getHistoryTree();
				
				while(!tree.isLeaf()) {
					Question question = tree.getPayload().question;
					tree = question.test(ctx) ? tree.getRight() : tree.getLeft();
				}
				
				TupleCountPair futures[] = pair.getFutures();
				int clusterid = tree.getPayload().clusterid;
				Long2IntMap map = countsMap.get(clusterid);
				if (map == null) {
					map = new Long2IntMap(futures.length);
					countsMap.put(clusterid, map);
				}
				for(TupleCountPair tc : futures) {
					map.addAndGet(tc.tuple & overtFactorsMask, tc.count);
				}
			}
		}
		channel.close();
	}
}
