// @HC-BEGIN
package org.apache.hadoop.mapred;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex;

import cache.CacheInputFormatId;
import cache.io.CacheInputFormat;

/**
 * A Singleton cache manager that holds the view of the task trackers cached data.
 * It is part of the job tracker, and is used when a task tracker attempts
 * to fetch a new map task.
 * 
 * @author adi
 *
 */
public enum CacheState {
	
	/* The singleton instance */
	INSTANCE;

	static final Log LOG = LogFactory.getLog(CacheState.class);
	
	/**
	 * Key is host, value is set of splits in the cache of this host. 
	 */
	private Map<String, Set<CacheInputFormatId>> nodeToSplits;
	
	/**
	 * Key is tip, value is the number of times this tip was NOT
	 * chosen to be executed.
	 */
	private Map<TaskInProgress, Integer> tipToSkipCounter; 

	private CacheState() {
		this.nodeToSplits = new HashMap<String, Set<CacheInputFormatId>>();
		this.tipToSkipCounter = new HashMap<TaskInProgress, Integer>();
	}

	/**
	 * Updates the splits in the cache of the node.
	 * 
	 * @param host
	 * @param splits
	 */
	public void updateNodeSplits(String host, Set<CacheInputFormatId> splits) {
		this.nodeToSplits.put(host, splits);
	}
	
	/**
	 * Finds the best tip for the given job for this host, according to the following policy:
	 * 1. If host contains a cached split of a specific tip, then that tip is returned.
	 * 2. If no other host contains a cached split of any tip than return null (let Hadoop descide)
	 * 3. If a certain tip is not included in the cache of a job, than return it.
	 *    else choose the most skipped tip.
	 * 
	 * @param tips
	 * @param host
	 * @return
	 */
	public TaskInProgress getBestTip(JobConf job, List<TaskInProgress> tips, String host) {
		
		String inputFormatId = job.get(CacheInputFormat.DELEGATE_INPUT_FORMAT_ID);
	
		// 1.
		Iterator<TaskInProgress> tipIterator = tips.iterator();
		while (tipIterator.hasNext()) {
			TaskInProgress tip = tipIterator.next();
			if (tip.isRunnable() && !tip.isRunning()) {
				if (!tip.hasFailedOnMachine(host)) {
					
					// Now we check if the one of the tasks in progress have hadoop-cache
					InputSplit split;
					try {
						TaskSplitIndex tipSplitIndex = tip.getSplitInfo().getSplitIndex();
						split = getSplitDetails(job, tipSplitIndex);
					} catch (IOException e) {
						LOG.error("cannot read split");
						continue;
					}
					
					CacheInputFormatId splitId;
					splitId = new CacheInputFormatId(split, inputFormatId);
					
					Set<CacheInputFormatId> splitsOnCache = this.nodeToSplits.get(host);
					if (splitsOnCache != null) {
						for (CacheInputFormatId splitOnCache : splitsOnCache) {
							
							
							if (splitId.equals(splitOnCache)) {
								LOG.info("hadoop-cache: cache found on " + host);
								tipIterator.remove();
								return tip;					
							}
						}						
					}
				} 
			} 
		}
		
		// 2.
		boolean foundCachedTip = false;
		TaskInProgress noneCachedTip = null;
		tipIterator = tips.iterator();
		while (tipIterator.hasNext()) {
			TaskInProgress tip = tipIterator.next();
			if (tip.isRunnable() && !tip.isRunning()) {
				if (!tip.hasFailedOnMachine(host)) {
					
					// Now we check if the one of the tasks in progress have hadoop-cache
					InputSplit split;
					try {
						TaskSplitIndex tipSplitIndex = tip.getSplitInfo().getSplitIndex();
						split = getSplitDetails(job, tipSplitIndex);
					} catch (IOException e) {
						LOG.error("cannot read split");
						continue;
					}
					
					CacheInputFormatId splitId;
					splitId = new CacheInputFormatId(split, inputFormatId);
					
					boolean currentTipIsCached = false;
					for (String anyHost : this.nodeToSplits.keySet()) {
						
						Set<CacheInputFormatId> splitsOnAnyHost = this.nodeToSplits.get(anyHost);
						
						for (CacheInputFormatId cacheInputFormatId : splitsOnAnyHost) {
							if (splitId.equals(cacheInputFormatId)) {
								foundCachedTip = true;
								currentTipIsCached = true;
							}
						}
						
					}
					
					if ((noneCachedTip == null) && (!currentTipIsCached)) {
						noneCachedTip = tip;
					} 
					
					
					/*
					Set<CacheInputFormatId> splitsOnCache = this.nodeToSplits.get(host);
					if (splitsOnCache != null) {
						for (CacheInputFormatId splitOnCache : splitsOnCache) {
							
							
							if (splitId.equals(splitOnCache)) {
								LOG.info("hadoop-cache: cache found on " + host);
								tipIterator.remove();
								return tip;					
							}
						}						
					}
					*/
				} 
			} 
		}
		
		if (!foundCachedTip) {
			LOG.info("no cahced tip found.");
			return null;
		}
		
		// 3 easy
		if (noneCachedTip != null) {
			LOG.info(" assigned tip: " + noneCachedTip);
			tips.remove(noneCachedTip);
			return noneCachedTip;
		}
		
		// 3 hard
		Map<TaskInProgress, Integer> tipToSkipCounterOld = tipToSkipCounter;
		tipToSkipCounter = new HashMap<TaskInProgress, Integer>();
		TaskInProgress mostSkippedTip = null;
		int maxSkippedTip = Integer.MIN_VALUE;
		for (TaskInProgress tip : tips) {
			int num;
			if (tipToSkipCounterOld.containsKey(tip)) {
				Integer tipSkipCount = tipToSkipCounterOld.get(tip);
				num = tipSkipCount + 1;
			} else {
				num = 1;
			}
			tipToSkipCounter.put(tip, num);
			if (num > maxSkippedTip) {
				maxSkippedTip = num;
				mostSkippedTip = tip;
			}
		}
		
		return mostSkippedTip;

	}
	
	/**
	 * Retrieves the input split details of the given splitIndex.
	 * 
	 * @param jobConf - the job configuration.
	 * @param splitIndex - the task split index to retrieve.
	 * @return the InputSplit corresponding to the given split index.
	 * @throws IOException In case we cannot access the split's meta-file.
	 */
	public static <T extends InputSplit> T getSplitDetails(JobConf jobConf, TaskSplitIndex splitIndex) throws IOException {
		Path file = new Path(splitIndex.getSplitLocation());
		long offset = splitIndex.getStartOffset();
		FileSystem fs = file.getFileSystem(jobConf);
		FSDataInputStream inFile = fs.open(file);
		inFile.seek(offset);
		String className = Text.readString(inFile);
		Class<T> cls;
		try {
			cls = (Class<T>) jobConf.getClassByName(className);
		} catch (ClassNotFoundException ce) {
			IOException wrap = new IOException("Split class " + className
					+ " not found");
			wrap.initCause(ce);
			throw wrap;
		}
		SerializationFactory factory = new SerializationFactory(jobConf);
		Deserializer<T> deserializer = (Deserializer<T>) factory.getDeserializer(cls);
		deserializer.open(inFile);
		T split = deserializer.deserialize(null);
		inFile.close();
		return split;
	}
}

//@HC-END