package dse.messages.singleversion;

import java.util.Hashtable;
//import java.util.Iterator;
import java.util.LinkedList;

import dse.Cluster;
import dse.DataObject;
import dse.Experiment;
import dse.ExperimentRecorder;
import dse.Node;
import dse.messages.Message;
import dse.messages.MessagePurpose;
import dse.messages.MessageType;
import dse.messages.Transaction;
import dse.messages.locatedataobj.LocateClustersWithDataObjectRequester;
import dse.messages.updatingshares.ActivateUpdateShareMessageHandler;
import dse.messages.updatingshares.ActivateUpdateShareRequester;


/**
 * This transaction handles the downloading of shares for a data object.
 *  
 * The sequence of messages is:
 * 	1.	Node determines which shares it is going to request.  It does not need
 * 		to get all shares, but could issue requests for all of them initally so
 * 		that it knows quickly if it can get the data object.  If it only tries
 * 		to get a few shares initally, then if some of those requests fail it
 * 		will have to send out more requests for different shares, and that ends
 * 		up taking more time.  This decision is managed by the 
 * 		ShareRequestDecisionMaker.
 *	2.	Node sends share requests to each node in its local cluster that it 
 *		deems responsible for a share (this is that node whose cluster id most
 *		closely preceeds the hashed share id).  The hit count is recorded.
 *	3.1	Those nodes respond immediately with the share IF they have it.
 *	3.2	If the node has previously confirmed that the share is hosted in a 
 *		remote cluster, then it attempts to do this again.  So skip to step 5
 *		and if that fails then go to step 3.3.
 *	3.3	Those nodes that don't know about the share and aren't hosting it then 
 *		send a locate-clusters-with-data-object message to their super node.
 *	4.	The super node, in response to the locate-clusters-with-data-object 
 *		message responds with clusters based off hashing the data-object id and 
 *		looking in its cached bloom filters.
 *	5.	The node will then send a has-share message to the node assigned to 
 *		host the share in the closest cluster.
 *	6.	The nodes then respond affirmatively or negatively.  
 *	7.	Upon reciet of a negative message the node will contact the next node
 *		with a has-share message.  And it will do this until it exhausts all 
 *		possible clusters (returned by the locate-clusters-with-data-object 
 *		message).  On the first affirmative response it sends a redirect
 *		response back to the original requesting node telling it where it can 
 *		find the data object share.  Otherwise it responds with failure.
 *	8.	The requesting node, upon getting enough redirect messages to share
 *		nodes, will contact those share nodes with get-share messages.
 *	9.	Those nodes will then respond with the data, or, failure if they no 
 *		longer have the share.
 *  
 * @author Aaron
 */
public class GetDataObjectSharesTransaction 
extends Transaction
implements ShareDownloader, ActivateUpdateShareRequester, LocateClustersWithDataObjectRequester//, HasShareRequester
{
	private Node requester;
	private DataObject desiredObject;
	
	//private Cluster requesterCluster;
	private int lastShareRequested;
	private Cluster currentCluster;
	private LinkedList<Cluster> clusterDataMappings;	
	private Hashtable<Integer, Node> sharesDownloaded;
	//private boolean hasShare;
	//private boolean noShare;
	private int messageAdded;
	private int shareNeeded;
	//private int hasShareMessageAdded;
	
	private Hashtable<Cluster, Hashtable<Integer, Node>> sharesUpdated;
	private int sharesThatCouldntBeDownloaded;
	private int sharesUpdatedSucessful;
	
	private boolean failed;
	private int validShareCount;
	private int activateCount;
	
	public GetDataObjectSharesTransaction (
			Experiment experiment,
			int id,
			Node requester,
			DataObject desiredObject
			)
	{
		super(id, requester, experiment);
		
		this.requester = requester;
		this.desiredObject = desiredObject;
		//this.requesterCluster = experiment.network().lookupCluster(requester.clusterId());
		this.currentCluster = null;
		this.clusterDataMappings = null;
		this.sharesDownloaded = new Hashtable<Integer, Node>();
		this.sharesUpdated = null;
		this.sharesThatCouldntBeDownloaded = 0;
		this.failed = false;
		this.validShareCount = 0;
		this.activateCount = 0;
		this.sharesUpdatedSucessful = 0;
		//this.hasShare = false;
		//this.noShare = false;
		this.messageAdded = 0;
		this.shareNeeded = experiment.modules().shareRequestDecisionMaker().sharesToRequestFor(desiredObject);
		//this.hasShareMessageAdded = 0;
	}
	
	
	/**
	 * This initiates the first set of messages.
	 */
	
	public void start (ExperimentRecorder recorder)
	{	
		/*if(isUpdate)
		{
			recorder.dataUpdateInitiated(
					super.id, requester, desiredObject, 
					timeLeft
					);
			
			recorder.debug(this, "start", timeLeft);
		
			Iterable<Cluster> clusters = experiment.modules().assigner().assign(desiredObject, 
					experiment.network().clusters(), experiment.network().maxClusterId());
			validShareCount = 0;
			this.sharesUpdated = new Hashtable<Cluster, Hashtable<Integer, Node>>();
			for(Cluster c : clusters)
			{
				Hashtable<Node, Integer> nodes = experiment.modules().assigner().hostedShares(c, desiredObject);
				for (Node n : nodes.keySet()) {
					if (sendShareUpdate(
							recorder, n, nodes.get(n), timeLeft
							))
						validShareCount++;
				}
				if (validShareCount < desiredObject.requiredShareCount())
					transactionFailed(recorder, timeLeft);
			}
		}
		else
		{
			recorder.dataRequestInitiated(
					super.id, requester, desiredObject, 
					timeLeft
					);
			
			recorder.debug(this, "start", timeLeft);
			
			sendLocateClustersWithDataObjectMessage(recorder, timeLeft);
			
			/*int srf = experiment.modules().shareRequestDecisionMaker().
				sharesToRequestFor(
					desiredObject
					);
			int messagesAdded = 0;
			
			lastShareRequested = 1;
			for (; lastShareRequested <= srf; lastShareRequested++) {
				if (sendGetShareLocalRequest(
						recorder,timeLeft
						))
					messagesAdded++;
			}		
			lastShareRequested--;
			if (messagesAdded == 0)
				transactionFailed(recorder, timeLeft);*/
		//}
	}


	public void shareDownloaded (
			ExperimentRecorder recorder,
			Node from,
			DataObject dobj, 
			int shareIndex,
			double timeLeft
			)
	{
		recorder.debug(
				this, 
				"shareDownloaded{" + dobj.id() + "," + shareIndex+ "}! by " +
				requester + " in trans#" + super.id +  
				" (shareCount now = " + (sharesDownloaded.size()+1)
				+ ")",
				timeLeft
				);
		
		// This should really never fail...
		assert (dobj == desiredObject);
		
		if (failed)
			return;
		
		// Do something
		if (sharesDownloaded.contains(shareIndex)) {
			recorder.warn(
					"GetDataObjectSharesTransaction #" + super.id + ": " +
					" Share #" + shareIndex + " was downloaded multiple times."
					);
			return;
		}
		
		if (finished) {
			recorder.warn(
					"GetDataObjectSharesTransaction #" + super.id + ": " +
					" Share #" + shareIndex + " was downloaded AFTER the " +
					"entire data object was already successfully " +
					"reconstituted from other shares."
					);
			return;
		}
		
		sharesDownloaded.put(shareIndex, from);
		messageAdded--;
		
		// Have we downloaded enough shares?
		if (sharesDownloaded.size() >= dobj.requiredShareCount()) {
			// We are done, we have enough downloads!
			finished = true;
			failed  = false;
			recorder.debug(
					"GetDataObjectSharesTransaction trans#" + super.id + 
					" is finshed!!!"
					);
			recorder.dataRequestFinished(
					super.id, 
					requester, 
					desiredObject, 
					true, 
					experiment.network().lookupCluster(
							from.clusterId()
							).superNode(),
					experiment.network().lookupCluster(
							from.clusterId()
							).isCaching(dobj, Experiment.currentPeriod),
					timeLeft
					);
		}
		else if(messageAdded == 0)
			tryNextCluster(recorder, timeLeft, this.desiredObject.requiredShareCount()-this.sharesDownloaded.size());
	}
			
	public void shareDownloadFailed (
			ExperimentRecorder recorder, 
			DataObject dobj, 
			int shareIndex,
			double timeLeft
			)
	{
		recorder.debug(
				this, 
				"shareDownloadFailed{dobj " + dobj.id() + " shr" + 
				shareIndex + "} by " + requester + " in trans#" + super.id,
				timeLeft
				);
		messageAdded--;
		// Have we already declared failure?
		if (failed || finished)
			return;
		
		if (requester.isUp() == false)
			// Then we are pretty much dead
			transactionFailed(recorder, timeLeft);
		else {
			sharesThatCouldntBeDownloaded++;
			
			// Can we sill possibly succeed?
			/*if (sharesThatCouldntBeDownloaded >
				desiredObject.shareCount() - desiredObject.requiredShareCount())
			{
				transactionFailed(recorder, timeLeft);
				return;
			}
			
			// At this point we should issue another request for the share or if
			// we should just give up.
			while (lastShareRequested <= desiredObject.shareCount()) {
				lastShareRequested++;
				if (sendGetShareLocalRequest(recorder, timeLeft))
					break; // means message successfully enqueued.
			}*/
			if (sharesThatCouldntBeDownloaded > 
			validShareCount - (desiredObject.requiredShareCount()-sharesDownloaded.size()))
			{
				if(this.clusterDataMappings.size() == 0)
				{
					transactionFailed(recorder, timeLeft);
					return;
				}
				while (lastShareRequested <= desiredObject.shareCount()) {
					lastShareRequested++;
					if (!sharesDownloaded.containsKey(lastShareRequested) && sendGetShareRequest(currentCluster, recorder, timeLeft))
						messageAdded++; // means message successfully enqueued.
				}
			}
			// At this point we should issue another request for the share or if
			// we should just give up.
			else
			{
				while (lastShareRequested <= desiredObject.shareCount()) {
					lastShareRequested++;
					if (!sharesDownloaded.containsKey(lastShareRequested) && sendGetShareRequest(currentCluster, recorder, timeLeft))
					{
						messageAdded++;
						break; // means message successfully enqueued.
					}
				}
			}
			if((messageAdded == 0) && (this.sharesDownloaded.size()<this.desiredObject.requiredShareCount()))	
				tryNextCluster(recorder, timeLeft, this.desiredObject.requiredShareCount()-this.sharesDownloaded.size());
		}		
	}
	
	public void shareUpdateFailed (
			ExperimentRecorder recorder, 
			DataObject dobj, 
			int shareIndex,
			double timeLeft
			)
	{
		recorder.debug(
				this, 
				"shareUpdateFailed{dobj " + dobj.id() + " shr" + 
				shareIndex + "} by " + requester + " in trans#" + super.id,
				timeLeft
				);
		
		// Have we already declared failure?
		if (failed)
			return;
		
		if (requester.isUp() == false)
			// Then we are pretty much dead
			transactionFailed(recorder, timeLeft);
		else {
			sharesThatCouldntBeDownloaded++;
			
			// Can we sill possibly succeed?
			/*if (sharesThatCouldntBeDownloaded >
				desiredObject.shareCount() - desiredObject.requiredShareCount())
			{
				transactionFailed(recorder, timeLeft);
				return;
			}
			else
			{*/
				if((sharesUpdatedSucessful+this.sharesThatCouldntBeDownloaded)>=validShareCount)
				{
					//this.updateTransfer = true;
					AfterUpdateTransfer(recorder, timeLeft);
					
				}
			//}
		}
	}

	public boolean isDoneDownloadingShares () {
		return (failed || finished);
	}
	
	/*private boolean sendGetShareLocalRequest (
			ExperimentRecorder recorder,
			double timeLeft
			)
	{
		// This is who we want to contact...
		Node designatedShareHost = experiment.modules().assigner().assign(
				desiredObject, lastShareRequested, requesterCluster
				);
		if (designatedShareHost == null) {
			// We should turn this into a warning ...
			recorder.warn(
					"Share assigner failed to map share to node in cluster!"
					);
			return false;
		}
		
		Message m = super.createRequestShareLocalMessage(
				requester, 
				designatedShareHost, 
				desiredObject, 
				lastShareRequested,
				this
				);
		recorder.debug(this, "new msg: " + m, timeLeft);
		if (timeLeft > ExperimentRecorder.PeriodStart)
			experiment.messageQueue().enqueueMessageInPeriod(m, timeLeft);
		else
			experiment.messageQueue().enqueueMessage(m);
		return true;
	}*/
	
	/*private boolean sendShareUpdate(ExperimentRecorder recorder, Node node, int shareIndex,
			double timeLeft
			)
	{
		// This is who we want to contact...
		if (node == null) {
			// We should turn this into a warning ...
			recorder.warn(
					"Share assigner failed to map share to node in cluster!"
					);
			return false;
		}
		
		Message m = super.createMessage(
				MessageType.ShareDataTransfer, requester, node,
				desiredObject, MessagePurpose.DataObjectUpdate,
				new ShareDataTransferMessageHandler (
						experiment,
						desiredObject,
						shareIndex,
						this
						)
				);
		recorder.debug(this, "new msg: " + m, timeLeft);
		//if (timeLeft > ExperimentRecorder.PeriodStart)
			//experiment.messageQueue().enqueueMessageInPeriod(m, timeLeft);
		//else
			//experiment.messageQueue().enqueueMessage(m);
		return true;
	}*/
	
	
	private void transactionFailed (
			ExperimentRecorder recorder, 
			double timeLeft
			)
	{
		recorder.debug(this, "transFailed", timeLeft);
		
		finished = true;
		failed = true;
		if(isUpdate)
		{
			for(Integer si : this.sharesDownloaded.keySet())
			{
				sharesDownloaded.get(si).removeUnactivatedUpdatedDataObjectShare(desiredObject, si.intValue());
			}
			recorder.dataUpdateFinished(
				super.id, 
				requester, 
				desiredObject, 
				false,
				timeLeft
				);
		}
		else
			recorder.dataRequestFinished(
				super.id, 
				requester, 
				desiredObject, 
				false, 
				null,
				false,
				timeLeft
				);
	}
	
	
	
	public void shareUpdated (
			ExperimentRecorder recorder,
			Node to,
			DataObject dobj, 
			int shareIndex,
			double timeLeft
			)
	{
		recorder.debug(
				this, 
				"shareUpdated{" + dobj.id() + "," + shareIndex+ "}! by " +
				requester + " in trans#" + super.id +  
				" (shareCount now = " + (sharesDownloaded.size()+1)
				+ ")",
				timeLeft
				);
		
		// This should really never fail...
		assert (dobj == desiredObject);
		if(!to.isHostingUnactive(dobj, shareIndex))
			to.update(dobj, shareIndex, false);
		if (failed)
			return;
		Cluster cluster = experiment.network().lookupCluster(to.clusterId());
		// Do something
		if (sharesUpdated.containsKey(cluster) && sharesUpdated.get(cluster).containsKey(shareIndex)) {
			recorder.warn(
					"GetDataObjectSharesNoCacheTransaction #" + super.id + ": " +
					" Share #" + shareIndex + " was updated multiple times."
					);
			return;
		}
		
		if (finished) {
			recorder.warn(
					"GetDataObjectSharesNoCacheTransaction #" + super.id + ": " +
					" Share #" + shareIndex + " was updated AFTER the " +
					"entire data object was already successfully " +
					"updated"
					);
			return;
		}
		
		if(sharesUpdated.get(cluster) == null)
			sharesUpdated.put(cluster, new Hashtable<Integer, Node>());
		sharesUpdated.get(cluster).put(shareIndex, to);
		sharesUpdatedSucessful++;
		
		// Have we downloaded enough shares?
		if ((sharesUpdatedSucessful+this.sharesThatCouldntBeDownloaded)>=validShareCount) 
		{
			// We are done, we have enough updating!
			/*finished = true;
			failed  = false;
			recorder.debug(
					"GetDataDhtObjectSharesTransaction trans#" + super.id + 
					" is finshed!!!"
					);
			recorder.dataUpdateFinished(
					super.id, 
					requester, 
					desiredObject, 
					true, 
					to,
					timeLeft
					);*/ 
			AfterUpdateTransfer(recorder, timeLeft);
		}
	}
	
	public boolean isDoneUpdatingShares (){return (failed || finished);}
	
	private void AfterUpdateTransfer(ExperimentRecorder recorder, double timeLeft)
	{
		boolean success = false;
		for(Hashtable<Integer, Node> mappings : sharesUpdated.values())
		{
			if(mappings.size() >= desiredObject.requiredShareCount())
			{
				success = true;
				break;
			}
		}
		
		if(success)
		{
			for(Hashtable<Integer, Node> mappings : sharesUpdated.values())
			{
				for(Integer shareIndex : mappings.keySet())
				{
					Message m = super.createMessage(MessageType.ActivateUpdateShare,
							requester, mappings.get(shareIndex), MessagePurpose.DataObjectUpdate,
							new ActivateUpdateShareMessageHandler(experiment, desiredObject, 
									shareIndex.intValue(), this)
							);
					recorder.debug(
							this, "new msg: " + m, timeLeft
							);
					//experiment.messageQueue().enqueueMessageInPeriod(
						//	m, timeLeft
							//);
				}
			}
		}
		else
			transactionFailed(recorder, timeLeft);
	}
	
	public void shareActivated (
			ExperimentRecorder recorder,
			DataObject dobj,
			int shareIndex,
			Node node,
			double timeLeft
			)
	{
		activateCount++;
		node.activateUpdatedDataObjectShare(dobj, shareIndex);
		if(activateCount == this.sharesUpdatedSucessful)
		{
			finished = true;
			failed  = false;
			recorder.debug(
					"GetDataDhtObjectSharesTransaction trans#" + super.id + 
					" is finshed!!!"
					);
			recorder.dataUpdateFinished(
					super.id, 
					requester, 
					desiredObject, 
					true, 
					timeLeft
					);
		}		
	}
	
	public void shareCouldNotBeActivated (
			ExperimentRecorder recorder,
			DataObject dobj,
			int shareIndex,
			Node node,
			double timeLeft
			)
	{
		node.removeUnactivatedUpdatedDataObjectShare(dobj, shareIndex);
		this.transactionFailed(recorder, timeLeft);
	}
	
	/*private void sendLocateClustersWithDataObjectMessage( 
			ExperimentRecorder recorder, double timeLeft
			)
	{
		Node superNode = experiment.network().lookupCluster(
				requester.clusterId()).superNode();
		
		// We need to send the super node a locate clusters with data object 
		// message
		Message m = super.createLocateClustersWithDataObjectMessage(
				requester, superNode, this.desiredObject, MessagePurpose.DataObjectRequest, this
				);
		recorder.debug(this, "new msg: " + m, timeLeft);
		//experiment.messageQueue().enqueueMessageInPeriod(m, timeLeft);
	}*/	
	
	public void clustersThatMightHaveDataObject (
			ExperimentRecorder recorder, 
			LinkedList<Cluster> clusters, 
			double timeLeft
			)
	{
		if (requester.isDown()) 
		{
			this.transactionFailed(recorder, timeLeft);
			return;
		}
		
		
		if (clusters == null) 
		{
			// Well that didnt work ;) we should send a failure message back
			// to the client
			this.transactionFailed(recorder, timeLeft);
		} 
		else 
		{
			// Next step!
			this.clusterDataMappings = clusters;
			
			// Now we have to contact the super node of said cluster to find 
			// which node has the share...
			this.tryNextCluster(recorder, timeLeft, this.shareNeeded);
		}
	}
	
	private void tryNextCluster(ExperimentRecorder recorder, double timeLeft, int numberNeeded)
	{
		if(clusterDataMappings.size() == 0)
		{
			this.transactionFailed(recorder, timeLeft);
			return;
		}
		
		this.currentCluster = clusterDataMappings.removeFirst();
		this.validShareCount = desiredObject.shareCount()-this.sharesDownloaded.size();
		this.sharesThatCouldntBeDownloaded = 0;
		this.lastShareRequested = 1;
		this.shareNeeded = numberNeeded;
		this.messageAdded = 0;
		//this.hasShare = false;
		//this.noShare = false;
		
		//if(this.currentCluster.second)
			tryDhtCluster(recorder, timeLeft, numberNeeded);
		//else
			//tryCacheCluster(recorder, timeLeft, numberNeeded);
	}
	
	private void tryDhtCluster(ExperimentRecorder recorder, double timeLeft, int numberNeeded)
	{
		for(; (messageAdded<numberNeeded) && (lastShareRequested<=desiredObject.shareCount()); lastShareRequested++)
		{
			if(!sharesDownloaded.containsKey(lastShareRequested))
			{
				if(sendGetShareRequest (currentCluster, recorder, timeLeft))
				{
					messageAdded++;
				}
			}
		}
		this.lastShareRequested--;
		if(messageAdded == 0)
		{
			tryNextCluster(recorder, timeLeft, numberNeeded);
		}
	}
	
	/*private void tryCacheCluster(ExperimentRecorder recorder, double timeLeft, int numberNeeded)
	{
		for(; lastShareRequested<=desiredObject.shareCount(); lastShareRequested++)
		{
			if(!sharesDownloaded.containsKey(lastShareRequested))
			{
				if(sendHasShareRequest (currentCluster.first, lastShareRequested, recorder, timeLeft))
				{
					break;
				}
			}
		}
	}*/
	
	private boolean sendGetShareRequest (
			Cluster nearestCluster,
			ExperimentRecorder recorder,
			double timeLeft
			)
	{
		// This is who we want to contact...
		Node designatedShareHost = experiment.modules().assigner().assign(
				desiredObject, lastShareRequested, nearestCluster
				);
		if (designatedShareHost == null) {
			// We should turn this into a warning ...
			recorder.warn(
					"Share assigner failed to map share to node in cluster!"
					);
			return false;
		}
		
		Message m = super.createRequestShareMessage(
				requester, 
				designatedShareHost, 
				desiredObject, 
				lastShareRequested,
				this
				);
		recorder.debug(this, "new msg: " + m, timeLeft);
		//if (timeLeft > ExperimentRecorder.PeriodStart)
			//experiment.messageQueue().enqueueMessageInPeriod(m, timeLeft);
		//else
			//experiment.messageQueue().enqueueMessage(m);
		return true;
	}
	
	/*private boolean sendHasShareRequest(
			Cluster nearestCluster,
			int shareIndex,
			ExperimentRecorder recorder,
			double timeLeft
			)
	{
		// This is who we want to contact...
		Node designatedShareHost = experiment.modules().assigner().assign(
				desiredObject, shareIndex, nearestCluster
				);
		if (designatedShareHost == null) {
			// We should turn this into a warning ...
			recorder.warn(
					"Share assigner failed to map share to node in cluster!"
					);
			return false;
		}
		
		Message m = super.createHasShareRequestMessage(
				requester, 
				designatedShareHost, 
				desiredObject, 
				shareIndex,
				MessagePurpose.DataObjectRequest,
				this
				);
		recorder.debug(this, "new msg: " + m, timeLeft);
		if (timeLeft > ExperimentRecorder.PeriodStart)
			experiment.messageQueue().enqueueMessageInPeriod(m, timeLeft);
		else
			experiment.messageQueue().enqueueMessage(m);
		return true;
	}
	
	public void shareFound (ExperimentRecorder recorder, double timeLeft)
	{
		recorder.debug(
				this, "shareFound{" + desiredObject.id() +"}!", 
				timeLeft
				);
		// In this case we are pretty much done, and need to communicate this
		// info back to the origional requester
		//noShare = false;
		//if(!hasShare)
		//{
			//this.hasShare = true;
			tryDhtCluster(recorder, timeLeft, this.shareNeeded);
		//}
		//recorder.debug(this, "new msg: " + m, timeLeft);
		//experiment.messageQueue().enqueueMessageInPeriod(m, timeLeft);
		
		// Well we should also try to remember this for next time
		/*origReceiver.cacheLocation(
				dobj, shareIndex, foreignNodeSuspectedOfHavingShare
				);*/
	/*}
	
	public void shareNotFound (ExperimentRecorder recorder, double timeLeft)
	{
		if (requester.isDown()) {
			this.transactionFailed(recorder, timeLeft);
			return;
		}
		
		recorder.debug(
				this, "shareNOTFound{" + this.desiredObject.id() + "}!", 
				timeLeft
				);
		
		// OK, that didnt work out so we need to send out a locate clusters 
		// with shares message if we havent done that before, or if we have
		// and we are iterating through clutsers that had shares then we need
		// to go to the next cluster, or finally if we have nothing else to do
		// then we have to send a failure message back to the client.
		//this.hasShare = false;
		//if(!noShare)
		//{
			//noShare = true;
			tryNextCluster(recorder, timeLeft, this.shareNeeded);
		//}
	}
	
	public void shareNotFound (ExperimentRecorder recorder, double timeLeft, boolean up)
	{
		if (requester.isDown()) {
			this.transactionFailed(recorder, timeLeft);
			return;
		}
		
		recorder.debug(
				this, "shareNOTFound{" + this.desiredObject.id() + "}!", 
				timeLeft
				);
		
		// OK, that didnt work out so we need to send out a locate clusters 
		// with shares message if we havent done that before, or if we have
		// and we are iterating through clutsers that had shares then we need
		// to go to the next cluster, or finally if we have nothing else to do
		// then we have to send a failure message back to the client.
		
			// Try next cluster...
	
		this.lastShareRequested++;
		while(lastShareRequested<=this.desiredObject.shareCount())
		{
			if(this.sendHasShareRequest(currentCluster.first, lastShareRequested, recorder, timeLeft))
				break;
			this.lastShareRequested++;
		}
		
		if(lastShareRequested > this.desiredObject.shareCount())
			tryNextCluster(recorder, timeLeft, this.shareNeeded);
	}*/
}
