/*  
  -- The Lime II Project -- 

  A tuplespaces-based middleware for coordinating agents and mobile hosts.
  Copyright (C) 2005.
  Gian Pietro Picco, Amy L. Murphy, Lorenzo Bellini.

  This library is free software; you can redistribute it and/or
  modify it under the terms of the GNU Lesser General Public
  License as published by the Free Software Foundation; either
  version 2.1 of the License, or (at your option) any later version.

  This library is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  Lesser General Public License for more details.

  You should have received this copy of the GNU Lesser General Public
  License along with this library; if not, write to the Free Software
  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 
*/

package lime2;
import lime2.ca.ICommunicationAdapter;

/**
 * The superclass of all remote protocols.
 * <br>
 * <br>
 * <i>Special thanks to Giovanni Turi and Andrea Sini for reporting a bug of this class.</i>
 * 
 * @author Gian Pietro Picco
 * @author Amy Murphy
 * @author Lorenzo Bellini
 */ 

public abstract class Protocol implements Runnable, Policies
{
 // member variables
 static CommunicationMgr cMgr    = CommunicationMgr.get(); // the CommunicationMgr has already been initialized 
 static ICommunicationAdapter ca = cMgr.getCA();           // the communication adapter 
 LimeTSMgr ltsm        = null;                             // the LimeTSMgr of the involved tuplespace  
 IncomingBuffer buffer = null;                             // the input buffer of the current thread 
 Result agent          = null;                             // the result on which an agent is waiting, if any
 RemoteOp ro           = null;                             // the op that started this protocol 
 byte type             = -1;                               // the type of the protocol 
 byte subtype          = -1;                               // the subtype of the protocol
 
 /**
  * Run method: performs reconciliation, if necessary.
  */
 public void run()
 {
  // (a) tuplespace must exist (ltsm != null)
  // (b) tuplespace must be "conservative" (policy == CONSERVATIVE)
  // (c) it must not be a multicast request (.. please note that we don't have multicast replies)
  if (ltsm == null || ltsm.getPolicy() != CONSERVATIVE || (ro.subtype == LimeConstants.OPEN && ro.target == null)) return;
  else
  {	
   // please note that reconciliator instances are refreshed when the LimeSystemTupleSpace is refreshed  	
   Reconciliator recn = Reconciliator.get((ro.subtype == LimeConstants.PASSIVE_OPEN ? ro.source : ro.target), ro.name);	
   // .. atomic get & set
   boolean saux = false; 
   synchronized(recn) { 
	   if (!recn.isReconciling()) { 
		   recn.setReconciling(true); 
		   saux = true; 
//		   System.out.println("*****************Protocol: set the reconciliating for:" + 
//		   (ro.subtype == LimeConstants.PASSIVE_OPEN ? ro.source : ro.target) + " with the name: " + 
//		   ro.name);
		   } 
	   }
//   System.out.println("*****************Protocol: the status reconciliation for: " + 
//		   (ro.subtype == LimeConstants.PASSIVE_OPEN ? ro.source : ro.target) + " with the name: " + 
//		   ro.name + "\n is : is reconciliated? " + recn.isReconciled() + " and is reconciliating " + recn.isReconciling() +
//		   "\n is within reacton?" + ltsm.isWithinReaction() + " is nestedReconciliation?" + Reconciliator.isNestedReconciliation());
   // (a) if we are already reconciled, skip reconciliation
   // (b) if we are within the previous reconciliation process, skip reconciliation (if failures occur, they will be
   //     fixed by the next reconciliation)
   // (c) see the note at the bottom
   if (!recn.isReconciled() && saux && !(ltsm.isWithinReaction() && Reconciliator.isNestedReconciliation()))
   {	
   	// we don't want simultaneous reconciliation processes (per tuplespace); also protectes against the standard op 
   	// that arrives after reconciliation before (the passive side of) reconciliation on this host has finished (=
   	// set "recn.reconciled" to true)
    synchronized (ltsm) 
	{ 
     RemReconcileOp rco = new RemReconcileOp(LimeConstants.RECONCILE, LimeConstants.OPEN, ltsm.getName(), 
	    								     (ro.subtype == LimeConstants.PASSIVE_OPEN ? ro.source : ro.target), recn); 
//     System.out.println("******Do Reconciliation operation from " + rco.source + " to: " + rco.target + 
//    		 " with original source " + ro.source + " target " + ro.target);
     cMgr.serialRun(rco);
     recn.setReconciling(false);
    }
   }
   else{
	   /* if the reconciliation failed, we must set the correct status for it */
	   synchronized(recn) {
		   if(recn.isReconciling())
			   recn.setReconciling(false);
	   }
   }
//   System.out.println("*****************Protocol: the status reconciliation for: " + 
//		   (ro.subtype == LimeConstants.PASSIVE_OPEN ? ro.source : ro.target) + " with the name: " + 
//		   ro.name + "\n is : is reconciliated? " + recn.isReconciled() + " and is reconciliating " + recn.isReconciling() +
//		   "\n is within reacton?" + ltsm.isWithinReaction() + " is nestedReconciliation?" + Reconciliator.isNestedReconciliation());
  } 
  // what if during reconciliation of X with Y wrt tuplespace T1 the out of misplaced / ghost tuples or the 
  // installation of reactions makes X start a reconciliation process with host Z wrt tuplespace T2 ? 
  // There are four cases:
  //
  // 1) T1 = T2, Y = Z 
  // since we are already reconciling, nothing is done (.. it would be recursive) 
  //
  // 2) T1 = T2, Y != Z 
  // we already own the mutex of the LimeTSMgr :-)
  //
  // 3) T1 != T2, Y = Z 
  // we should compete for the mutex of the LimeTSMgr of the other tuplespace
  //
  // 4) T1 != T2, Y != Z 
  // we should compete for the mutex of the LimeTSMgr of the other tuplespace
  //
  // It is easy to see that a reconciliation process can lead to a chain of reconciliation processes (the thread 
  // collects - recursively - the mutex of a LimeTSMgr and then another.. and so on). 
  // Consider the following scenario:
  // - (a) this host is reconciliating with host H1 (wrt tuplespace T1)
  // - (b) this host is reconciliating with host H2 (wrt tuplespace T2)
  // Since the two reconciliation processes regard different tuplespaces, they can be executed in parallel. Let's
  // suppose that (a) starts a reconciliation process with host Hx wrt T2 and (b) starts a reconciliation process 
  // with host Hy wrt T1. This is a deadlock: both the threads try to acquire the mutex owned by the other one.
  //
  // Of course we don't want this scenario to happen. The key point is that we don't want reconciliation (that should
  // regard only a couple of hosts wrt a specific tuplespace) to degenerate into a distributed transaction concerning
  // (most of the LimeServers of) the system. This would violate the philosophy of Weak Lime.
  //
  // The solution is the following one: when a reconciliation process is triggered by a standard operation within 
  // another reconciliation process (= out of misplaced / ghost tuples or installation or reactions), we don't run the
  // nested reconciliation. 
  //
  // .. is this a problem ?
  //
  // No, because standard operations would cause the active side of reconciliation to start remotely - the standard 
  // operation itself reaches the remote host; if the target host considers itself engaged it just replies to the  
  // operation (.. asymmetrical view of the reconciliation status). 
  // Otherwise it will reply starting the active side reconciliation request. This second reconciliation process will
  // succeed or fail according to interleaving / timeouts (do we have to wait for the mutex of the LimeTSMgr ?, 
  // network latencies, workload / enqueuing, etc.. ) - but these flat (vs recursive) parallel reconciliation 
  // processes cannot cause deadlocks. 
  // If the other reconciliation fails it will be retried in the future by another operation or by NoLazyReconciler.
  // We have broken a distributed transaction into many parallel reconciliations that (can) influence each other: this
  // is really a "weak" semantics. 
 }

}

