package dse.messages.singleversion;

import java.util.Hashtable;
import java.util.LinkedList;

import dse.*;
import dse.messages.*;
import dse.messages.updatingshares.*;

public class GetDataObjectSharesNoCacheTransaction 
extends Transaction
implements ShareDownloader, ActivateUpdateShareRequester
{
	private Node requester;
	private DataObject desiredObject;
	
	private Cluster currentCluster;
	private int lastShareRequested;
	private LinkedList<Cluster> clusterDataMappings;
	
	private Hashtable<Integer, Node> sharesDownloaded;
	private Hashtable<Cluster, Hashtable<Integer, Node>> sharesUpdated;
	private int sharesThatCouldntBeDownloaded;
	private int sharesUpdatedSucessful;
	
	private boolean failed;
	private int validShareCount;
	private int activateCount;
	private int messagesAdded;
	
	
	public GetDataObjectSharesNoCacheTransaction (
			Experiment experiment,
			int id,
			Node requester,
			DataObject desiredObject
			)
	{
		super(id, requester, experiment);
		
		this.requester = requester;
		this.desiredObject = desiredObject;
		/*this.requesterCluster = experiment.network().lookupCluster(
				requester.clusterId()
				);*/
		this.currentCluster = null;
		this.sharesDownloaded = new Hashtable<Integer, Node>();
		this.sharesUpdated = null;
		this.sharesThatCouldntBeDownloaded = 0;
		this.failed = false;
		this.validShareCount = 0;
		this.activateCount = 0;
		this.clusterDataMappings = null;
		this.messagesAdded = 0;
		this.sharesUpdatedSucessful = 0;
	}
	
	/**
	 * This initiates the first set of messages.
	 */
	public void start (ExperimentRecorder recorder)
	{	
		/*if(isUpdate)
		{
			recorder.dataUpdateInitiated(
					super.id, requester, desiredObject, 
					timeLeft
					);
			
			recorder.debug(this, "start", timeLeft);
		
			Iterable<Cluster> clusters = experiment.modules().assigner().assign(desiredObject, 
					experiment.network().clusters(), experiment.network().maxClusterId());
			validShareCount = 0;
			this.sharesUpdated = new Hashtable<Cluster, Hashtable<Integer, Node>>();
			for(Cluster c : clusters)
			{
				Hashtable<Node, Integer> nodes = experiment.modules().assigner().hostedShares(c, desiredObject);
				for (Node n : nodes.keySet()) {
					if (sendShareUpdate(
							recorder, n, nodes.get(n), timeLeft
							))
						validShareCount++;
				}
				if (validShareCount < desiredObject.requiredShareCount())
					transactionFailed(recorder, timeLeft);
			}
		}
		else
		{
			recorder.dataRequestInitiated(
					super.id, requester, desiredObject, 
					timeLeft
					);
			
			recorder.debug(this, "start", timeLeft);
		
			int srf = experiment.modules().shareRequestDecisionMaker().
				sharesToRequestFor(
					desiredObject
					);
			messagesAdded = 0;
		
			clusterDataMappings = MessageUtil.getClustersThatMayHaveDataObject(
					experiment.network(), requester, desiredObject, experiment.modules().assigner());
			currentCluster = clusterDataMappings.removeFirst();
			
			lastShareRequested = 1;
			this.validShareCount = desiredObject.shareCount();
			for (; lastShareRequested <= srf; lastShareRequested++) {
				if (sendGetShareRequest(
						recorder, currentCluster, timeLeft
						))
					messagesAdded++;
			}		
			lastShareRequested--;
			if (messagesAdded == 0)
				tryNextCluster(recorder, timeLeft, desiredObject.requiredShareCount());
		}*/
	}	
	
	public void shareDownloaded (
			ExperimentRecorder recorder,
			Node from,
			DataObject dobj, 
			int shareIndex,
			double timeLeft
			)
	{
		recorder.debug(
				this, 
				"shareDownloaded{" + dobj.id() + "," + shareIndex+ "}! by " +
				requester + " in trans#" + super.id +  
				" (shareCount now = " + (sharesDownloaded.size()+1)
				+ ")",
				timeLeft
				);
		
		// This should really never fail...
		assert (dobj == desiredObject);
		
		if (failed)
			return;
		
		// Do something
		if (sharesDownloaded.contains(shareIndex)) {
			recorder.warn(
					"GetDataObjectSharesNoCacheTransaction #" + super.id + ": " +
					" Share #" + shareIndex + " was downloaded multiple times."
					);
			return;
		}
		
		if (finished) {
			recorder.warn(
					"GetDataObjectSharesNoCacheTransaction #" + super.id + ": " +
					" Share #" + shareIndex + " was downloaded AFTER the " +
					"entire data object was already successfully " +
					"reconstituted from other shares."
					);
			return;
		}
		
		sharesDownloaded.put(shareIndex, from);
		messagesAdded--;
		// Have we downloaded enough shares?
		
		if (sharesDownloaded.size() >= dobj.requiredShareCount()){
			// We are done, we have enough downloads!
			finished = true;
			failed  = false;
			recorder.debug(
					"GetDataObjectSharesNoCacheTransaction trans#" + super.id + 
					" is finshed!!!"
					);
			recorder.dataRequestFinished(
					super.id, 
					requester, 
					desiredObject, 
					true, 
					experiment.network().lookupCluster(
							from.clusterId()
							).superNode(),
					false,
					timeLeft
					);
		}
		else if(messagesAdded == 0)
			tryNextCluster(recorder, timeLeft, this.desiredObject.requiredShareCount()-this.sharesDownloaded.size());
	}
	
	public void shareDownloadFailed (
			ExperimentRecorder recorder, 
			DataObject dobj, 
			int shareIndex,
			double timeLeft
			)
	{
		recorder.debug(
				this, 
				"shareDownloadFailed{dobj " + dobj.id() + " shr" + 
				shareIndex + "} by " + requester + " in trans#" + super.id,
				timeLeft
				);
		
		// Have we already declared failure?
		if (failed)
			return;
		
		messagesAdded--;
		if (requester.isUp() == false)
			// Then we are pretty much dead
			transactionFailed(recorder, timeLeft);
		else {
			sharesThatCouldntBeDownloaded++;
			
			// Can we sill possibly succeed?
			if (sharesThatCouldntBeDownloaded > 
				validShareCount - (desiredObject.requiredShareCount()-sharesDownloaded.size()))
			{
				if(this.clusterDataMappings.size() == 0)
				{
					transactionFailed(recorder, timeLeft);
					return;
				}
				while (lastShareRequested <= desiredObject.shareCount()) {
					lastShareRequested++;
					if (!sharesDownloaded.containsKey(lastShareRequested) && sendGetShareRequest(recorder, currentCluster, timeLeft))
						messagesAdded++; // means message successfully enqueued.
				}
			}
			// At this point we should issue another request for the share or if
			// we should just give up.
			else
			{
				while (lastShareRequested <= desiredObject.shareCount()) {
					lastShareRequested++;
					if (!sharesDownloaded.containsKey(lastShareRequested) && sendGetShareRequest(recorder, currentCluster, timeLeft))
					{
						messagesAdded++;
						break; // means message successfully enqueued.
					}
				}
			}
			if((messagesAdded == 0) && (this.sharesDownloaded.size()<this.desiredObject.requiredShareCount()))	
				tryNextCluster(recorder, timeLeft, this.desiredObject.requiredShareCount()-this.sharesDownloaded.size());
		}	
	}

	public boolean isDoneDownloadingShares () {
		return (failed || finished);
	}
	

	private boolean sendGetShareRequest (
			ExperimentRecorder recorder,
			Cluster nearestCluster,
			double timeLeft
			)
	{
		// This is who we want to contact...
		/*Node designatedShareHost = experiment.modules().assigner().assign(
				desiredObject, lastShareRequested, nearestCluster
				);
		if (designatedShareHost == null) {
			// We should turn this into a warning ...
			recorder.warn(
					"Share assigner failed to map share to node in cluster!"
					);
			return false;
		}
		
		Message m = super.createRequestShareMessage(
				requester, 
				designatedShareHost, 
				desiredObject, 
				lastShareRequested,
				this
				);
		recorder.debug(this, "new msg: " + m, timeLeft);
		if (timeLeft > ExperimentRecorder.PeriodStart)
			experiment.messageQueue().enqueueMessageInPeriod(m, timeLeft);
		else
			experiment.messageQueue().enqueueMessage(m);*/
		return true;
	}
	
	/*private boolean sendShareUpdate(ExperimentRecorder recorder, Node node, int shareIndex,
			double timeLeft
			)
	{
		// This is who we want to contact...
		if (node == null) {
			// We should turn this into a warning ...
			recorder.warn(
					"Share assigner failed to map share to node in cluster!"
					);
			return false;
		}
		
		Message m = super.createMessage(
				MessageType.ShareDataTransfer, requester, node,
				desiredObject, MessagePurpose.DataObjectUpdate,
				new ShareDataTransferMessageHandler (
						experiment,
						desiredObject,
						shareIndex,
						this
						)
				);
		recorder.debug(this, "new msg: " + m, timeLeft);
		if (timeLeft > ExperimentRecorder.PeriodStart)
			experiment.messageQueue().enqueueMessageInPeriod(m, timeLeft);
		else
			experiment.messageQueue().enqueueMessage(m);
		return true;
	}*/
	
	
	private void transactionFailed (
			ExperimentRecorder recorder, 
			double timeLeft
			)
	{
		recorder.debug(this, "transFailed", timeLeft);
		
		finished = true;
		failed = true;
		if(isUpdate)
		{
			for(Integer si : this.sharesDownloaded.keySet())
			{
				sharesDownloaded.get(si).removeUnactivatedUpdatedDataObjectShare(desiredObject, si.intValue());
			}
			recorder.dataUpdateFinished(
				super.id, 
				requester, 
				desiredObject, 
				false,
				timeLeft
				);
		}
		else
			recorder.dataRequestFinished(
				super.id, 
				requester, 
				desiredObject, 
				false, 
				null,
				false,
				timeLeft
				);
	}
	
	
	/**ADD***/
	public void shareUpdated (
			ExperimentRecorder recorder,
			Node to,
			DataObject dobj, 
			int shareIndex,
			double timeLeft
			)
	{
		recorder.debug(
				this, 
				"shareUpdated{" + dobj.id() + "," + shareIndex+ "}! by " +
				requester + " in trans#" + super.id +  
				" (shareCount now = " + (sharesDownloaded.size()+1)
				+ ")",
				timeLeft
				);
		
		// This should really never fail...
		assert (dobj == desiredObject);
		if(!to.isHostingUnactive(dobj, shareIndex))
			to.update(dobj, shareIndex, false);
		if (failed)
			return;
		Cluster cluster = experiment.network().lookupCluster(to.clusterId());
		// Do something
		if (sharesUpdated.containsKey(cluster) && sharesUpdated.get(cluster).containsKey(shareIndex)) {
			recorder.warn(
					"GetDataObjectSharesNoCacheTransaction #" + super.id + ": " +
					" Share #" + shareIndex + " was updated multiple times."
					);
			return;
		}
		
		if (finished) {
			recorder.warn(
					"GetDataObjectSharesNoCacheTransaction #" + super.id + ": " +
					" Share #" + shareIndex + " was updated AFTER the " +
					"entire data object was already successfully " +
					"updated"
					);
			return;
		}
		
		if(sharesUpdated.get(cluster) == null)
			sharesUpdated.put(cluster, new Hashtable<Integer, Node>());
		sharesUpdated.get(cluster).put(shareIndex, to);
		sharesUpdatedSucessful++;
		
		// Have we downloaded enough shares?
		if ((sharesUpdatedSucessful+this.sharesThatCouldntBeDownloaded)>=validShareCount) 
		{
			// We are done, we have enough updating!
			/*finished = true;
			failed  = false;
			recorder.debug(
					"GetDataDhtObjectSharesTransaction trans#" + super.id + 
					" is finshed!!!"
					);
			recorder.dataUpdateFinished(
					super.id, 
					requester, 
					desiredObject, 
					true, 
					to,
					timeLeft
					);*/ 
			AfterUpdateTransfer(recorder, timeLeft);
		}
	}
	public void shareUpdateFailed (
			ExperimentRecorder recorder, 
			DataObject dobj, 
			int shareIndex,
			double timeLeft
			)
	{
		recorder.debug(
				this, 
				"shareUpdateFailed{dobj " + dobj.id() + " shr" + 
				shareIndex + "} by " + requester + " in trans#" + super.id,
				timeLeft
				);
		
		// Have we already declared failure?
		if (failed)
			return;
		
		if (requester.isUp() == false)
			// Then we are pretty much dead
			transactionFailed(recorder, timeLeft);
		else {
			sharesThatCouldntBeDownloaded++;
			
			// Can we sill possibly succeed?
			/*if (sharesThatCouldntBeDownloaded >
				desiredObject.shareCount() - desiredObject.requiredShareCount())
			{
				transactionFailed(recorder, timeLeft);
				return;
			}
			else
			{*/
				if((sharesUpdatedSucessful+this.sharesThatCouldntBeDownloaded)>=validShareCount)
				{
					//this.updateTransfer = true;
					AfterUpdateTransfer(recorder, timeLeft);
					
				}
			//}
		}
	}
	public boolean isDoneUpdatingShares ()
	{
		return (failed || finished);
	}
	
	
	private void tryNextCluster(ExperimentRecorder recorder, double timeLeft, int numberNeeded)
	{
		if(clusterDataMappings.size() == 0)
		{
			this.transactionFailed(recorder, timeLeft);
			return;
		}
		this.currentCluster = clusterDataMappings.removeFirst();
		this.validShareCount = desiredObject.shareCount()-this.sharesDownloaded.size();
		this.sharesThatCouldntBeDownloaded = 0;
		this.lastShareRequested = 1;
		messagesAdded = 0;
		for(; (messagesAdded<numberNeeded) && 
			(lastShareRequested<=desiredObject.shareCount()); lastShareRequested++)
		{
			if(!sharesDownloaded.containsKey(lastShareRequested))
			{
				if(sendGetShareRequest (recorder, currentCluster, timeLeft))
				{
					messagesAdded++;
				}
			}
		}
		this.lastShareRequested--;
		if(messagesAdded == 0)
		{
			tryNextCluster(recorder, timeLeft, numberNeeded);
		}
	}
	
	private void AfterUpdateTransfer(ExperimentRecorder recorder, double timeLeft)
	{
		/*boolean success = false;
		for(Hashtable<Integer, Node> mappings : sharesUpdated.values())
		{
			if(mappings.size() >= desiredObject.requiredShareCount())
			{
				success = true;
				break;
			}
		}
		
		if(success)
		{
			for(Hashtable<Integer, Node> mappings : sharesUpdated.values())
			{
				for(Integer shareIndex : mappings.keySet())
				{
					Message m = super.createMessage(MessageType.ActivateUpdateShare,
							requester, mappings.get(shareIndex), MessagePurpose.DataObjectUpdate,
							new ActivateUpdateShareMessageHandler(experiment, desiredObject, 
									shareIndex.intValue(), this)
							);
					recorder.debug(
							this, "new msg: " + m, timeLeft
							);
					experiment.messageQueue().enqueueMessageInPeriod(
							m, timeLeft
							);
				}
			}
		}
		else
			transactionFailed(recorder, timeLeft);*/
	}
	
	public void shareActivated (
			ExperimentRecorder recorder,
			DataObject dobj,
			int shareIndex,
			Node node,
			double timeLeft
			)
	{
		activateCount++;
		node.activateUpdatedDataObjectShare(dobj, shareIndex);
		if(activateCount == this.sharesUpdatedSucessful)
		{
			finished = true;
			failed  = false;
			recorder.debug(
					"GetDataDhtObjectSharesTransaction trans#" + super.id + 
					" is finshed!!!"
					);
			recorder.dataUpdateFinished(
					super.id, 
					requester, 
					desiredObject, 
					true, 
					timeLeft
					);
		}		
	}
	
	public void shareCouldNotBeActivated (
			ExperimentRecorder recorder,
			DataObject dobj,
			int shareIndex,
			Node node,
			double timeLeft
			)
	{
		node.removeUnactivatedUpdatedDataObjectShare(dobj, shareIndex);
		this.transactionFailed(recorder, timeLeft);
	}
	/**End ADD***/
}