/*
  (C) 2005 Adam D. Bradley & Michael Ocean

  This program is free software; you can redistribute it and/or
  modify it under the terms of Version 2 of the GNU General Public 
  License.
  
  This library is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  General Public License for more details.
  
  You should have received a copy of the GNU Library General Public
  License along with this library; if not, write to the Free
  Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/

package step;

import java.util.ArrayList;
import snobject.snObject;

/**
* A TriggerReadNode is the parent of a trigger, used to extract values
* from that trigger.  The three ReadModes govern how that extraction
* is done (poll/sleep/basis-time-filter).
*
* A NONBLOCK read returns immediately, possibly with a value of NIL
*     resulting from the trigger precondition never being executed.
* A BLOCK is willing to wait (if needed) for the precondition to
*     be evaluated before getting a result
* A FRESH waits until the next precondition/result pair before getting
*     a result

*******************************
*	Overview:
*******************************
*   This node allows the child node to run out-of-step
*   with the parented expression converting a stream of values into
*   discrete readings.  In effect, it allows it's child to
*   be executed irregularly while providing the requested access to
*   its values.  /LEGALLY/ the child node must be a persistent (level
*   or edge) trigger, because other nodes do not provide a stream of
*   values. 
*
******************************
*	In detail:
*******************************
* - Functionally this node specifies the freshness of the access to
*   the values generated below it.  The values from below are taken from
*   a value stream, which are generated by a persistent trigger (level or 
*   edge).  A persistent trigger naturally loops (it's persistent) in its
*   own thread of control (i.e., never returns to the caller).  The trigger
*   <b>produces</b> its most recent value into shared memory and the read
*   nodes <b>consume</b> those values and provide discrete, iterative access 
*   to that value stream.  Multiple read nodes may be connected to the same 
*   trigger (for sharing of compuation) and each may have different temporal 
*   needs for these values.
*   
*   
*	Temporal access needs to a value stream are broken into two larger classifications
*	
*   FRESH - these reads only care about the most recent value in the
*   	trigger stream/buffer.  Within the FRESH category there are two 
*   	temporal sematics.  (Say the execution of this read node occurs 
*   	at time t with a user specified interval i)
*   
*		0) non-block - return the most recent value or NIL
*		1) block - as above but will block if nil
*		2) next - will block until next value is produced
* 
* <OMIT>
*		1) value - return a value made available at (or after) t-i
*					(or block if there is no such value).
*   	2) execution - block while trigger computes a new value 
*   				whose comptuation began at (or after) time t-i.
*
*    		NOTICE: for fresh execution, the streaming trigger was running
*    			before yet the values in the buffer is "not fresh enough".
*    			Indeed the programmer could achieve a similar functional 
*    			data demantic via a regular trigger (non-persistent and
*    			non-streaming) and ensure that the value is only computed 
*    			in lock-step, when requested from above -- thus saving 
*    			cycles.  (LTE must work for regular triggers too, otherwise 
*    			that statment is incorrect).  However, if sharing of the trigger 
*    			resource is desirable, then the use of a persistent/streaming
*    			trigger would allow other computations to attatch to the buffer
*    
*   Triggers execute until they expire thanks to a persist expression from 
*   above (argument?  Flow type?).  When no parent wants its values any longer, 
*   the trigger could cease executing as its values are unused (however, it 
*   does not).
*    
* </OMIT>
*   	If you consider the stream of values, in a fresh read semantic some
*   	stream data is intentionally discarded unused, from the buffer.  
*   	Thus there is a	analogy to UDP, best effort, or REAL-TIME needs here. 
*   		
*   BUFFERED - reads are "reliable" reads from the stream.  The goal is to 
*   buffer every value passed up (within reason) and process at some point, 
*   timely or no.
*   
*   	1)  LIFO - any buffer value available (LIFO) or NIL if empty.
*   	2)	LIFOB - any buffer value available (LIFO) or block if empty.
*   	3)  FIFO - any buffer value available (FIFO) or NIL if empty.
*   	4)  FIFOB - any buffer value available (FIFO) or block if empty.
*   
*   Point in fact, OFFLINE reads may lose values as the buffer window of a 
*   trigger is finite.  A flowtype on the node here may specify buffer
*   window size and how many buffer drops indicate a failure condition.
*   
*/

public class TriggerReadNodeBuffered extends AbstractRead
{
			
	public TriggerReadNodeBuffered(String id, ReadMode _readmode){
		super(id,_readmode);
		valueBuffer = new ArrayList<snObject>();
    }

	public TriggerReadNodeBuffered(ReadMode _readmode) {
		super(_readmode);
		valueBuffer = new ArrayList<snObject>();
	}

    /**
     * update the current inputs (this.inputs) that correspond
     * to node 'who' to reflect 'value' and mark the input as
     * fresh.
     * 
     * may be overridden by subclasses that maintain a buffer
     * (specifically triggerreadnodes that are buffered)
     * 
     * @param value
     * @param who
     */
	  ArrayList<snObject> valueBuffer = null;

    protected void PushFreshInput(snObject value, step.Node who)
    {
   		if (children[0] == who) {
   			switch(readmode) {
	   			case FIFO:
	   			case FIFOB:
	   				int place = valueBuffer.size()-1;
	   				place = (place < 0)? 0 : place;
	   				valueBuffer.add(place,value);
	   				return;
	   			case LIFO:
	   			case LIFOB:
	   				valueBuffer.add(value);
	   				return;
   			}
   		}
		assert(false); 
    }
	
    public boolean IsEnabled()
    {
		switch(readmode) {
			case LIFOB:
			case FIFOB:			
			    return (!rt_evaluated && (valueBuffer.size()>0));
			case LIFO:
			case FIFO:			
			    return !rt_evaluated;
		}
		assert(false); return false;
    }

	public String GetInfoXML() {
		String s = "<bufferlen>" + valueBuffer.size() + "</bufferlen>";
		
		return GetInfoXML(s);
	}
    
    public void Evaluate() throws EvaluationFailure
    {		
		snObject v = null;
	    v = valueBuffer.remove(0);
	    assert(v!=null);
	    SetValue(v);
	    rt_evaluated = true;
	    PushResult();
	    return;    
    }

	protected java.util.Date requested;

	public String TagName() { return "bufferedread"; }

	public void Write(java.io.OutputStream out, int depth) throws java.io.IOException {
		WriteBasis(out, depth, "opcode=\"" + readmode + "\" bufferlen=\"" + valueBuffer.size() + "\" ");
	}

	@Override
	public Node ShallowCopy() {
		TriggerReadNodeBuffered cn = new TriggerReadNodeBuffered(ID, readmode);
		Node.CopyNonRTPropertiesFromAtoB(this,cn);
		return cn;
	}
}
