package Market;

import java.util.*;

import edu.iastate.jrelm.core.SimpleAction;
import edu.iastate.jrelm.rl.rotherev.REPolicy;
import edu.iastate.jrelm.core.JReLMAgent;
import edu.iastate.jrelm.rl.ReinforcementLearner;
import edu.iastate.jrelm.rl.SimpleStatelessLearner;
import edu.iastate.jrelm.rl.rotherev.variant.VREParameters;


import sim.engine.*;


public class GenCo implements Steppable {
	
	private static final long serialVersionUID = 6628525522939315274L;

// Solutions from Optimal Power Flow 
													/** Daily Net Earnings = sum of HourlyNetEarning over 24 hours  */
	private double  dailyNetEarnings;   
													/** dailyProfit = sum of HourlyProfit over 24 hours   */
	private double dailyProfit;   
												/** Sum of 24 hourly Revenues */
	private double dailyRevenue;  
													/** Fixed cost of production  */
	private double fixedCost;
													/**  GenCo's accumulative money holding, money (new) = money(previous) + DailyProfit(new) (array)*/ 
	public double cumulativeWealth=0d;

// Learning parameters
													/** GenCo's learning choice probability  */
	private double choiceProbability; 
													/** GenCo's learning choice propensity  */
	private double choicePropensity;   
													/** GenCo's learning choice ID  */
	private int    choiceID;   
													/** GenCo's propensity */
	double propensity;
													/** Random Seed */ 
	int randomSeed;  
													/** cooling parameter that affects the degree to which Generator i makes use of propensity values in determining its choice probabilities  <br> @see DynTestAMES.JSLT.pdf*/
	double cooling;
													/** The introduction of the recency parameter in (22) acts as a damper on the growth of the propensities over time <br> @see DynTestAMES.JSLT.pdf (22) */
	double recency;
													/** Theexperimentation parameter in (23) permits reinforcement to spill over to some extent from a chosen supply offer to other <br> supply offers to encourage continued experimentation with various supply offers in the early <br> stages of the learning process <br> @see DynTestAMES.JSLT.pdf (23) */
	double experimentation;
													/** 0->profit, 1->net earnings */ 
	double rewardSelection;
													/** Slope-start parameter <br> @see DynTestAMES.JSLT.pdf (pg 33) */
	private double slopeStart;

// Supply offer	
													/** Reported cost coefficient ($/MWh) <br> @see DynTestAMES.JSLT.pdf  */
	private double aR;
													/** Reported cost coefficient ($/MW^2h) <br> @see DynTestAMES.JSLT.pdf  */
	private double bR;
													/** Reported lower production limit (MWs) <br> @see DynTestAMES.JSLT.pdf */
	private double capRL;
													/** Reported  upper production limit (MWs) <br> @see DynTestAMES.JSLT.pdf */
	private double capRU;  	
													/** True cost coefficient ($/MWh) <br> @see DynTestAMES.JSLT.pdf   */
	private double aT; 
													/** True cost coefficient ($/MW^2h) <br> @see DynTestAMES.JSLT.pdf  */
	private double bT; 
													/** True lower production limit (MWs) <br> @see DynTestAMES.JSLT.pdf */
	private double capTL;
													/** True  upper production limit (MWs) <br> @see DynTestAMES.JSLT.pdf */
	private double capTU;
													/** l = aT+2*bT*capTL  <br> @see DynTestAMES.JSLT.pdf (28)*/
	private double l;
													/** u = aT+2*bT*CapRU <br> @see DynTestAMES.JSLT.pdf (29) */
	private double u;
													/** lR = l/(1-rIL) <br> @see DynTestAMES.JSLT.pdf (30) */
	private double lR;
													/** uR = us/(1-rIU) <br> @see DynTestAMES.JSLT.pdf (32) */
	private double uR;
													/** if u>lR : uS = u  else: uS = lR + slopestart <br> @see DynTestAMES.JSLT.pdf (31)*/
	private double uS;
													/** rCapL = capTL/capTU <br> @see DynTestAMES.JSLT.pdf (35) */
	double rCapL;
													/** rCapU belongs to [RIMinC,1] and rIMinC Range-index parameter for Action domain construction @see DynTestAMES.JSLT.pdf */
	double rCapU;
													/** rIL =< rIMaxL < 1 @see DynTestAMES.JSLT.pdf */
	double rIL;
													/** rIU =< rIMaxU < 1  @see DynTestAMES.JSLT.pdf*/
	double rIU;
													/** Cardinality of the action domain for GenCo  <br> @see DynTestAMES.JSLT.pdf */ 
	double m1;
													/** Cardinality of the action domain for GenCo  <br> @see DynTestAMES.JSLT.pdf */
	double m2;
													/** Cardinality of the action domain for GenCo  <br> @see DynTestAMES.JSLT.pdf */
	double m3;
													/** M = m1 * m2 * m3 */
	double M;
													/** Range-index parameter for Action domain construction  <br> @see DynTestAMES.JSLT.pdf */
	double rIMaxL;
													/** Range-index parameter for Action domain construction  <br> @see DynTestAMES.JSLT.pdf */
	double rIMaxU;
													/** rIMinC Range-index parameter for Action domain construction  <br> @see DynTestAMES.JSLT.pdf  */
	double rIMinC;
													/** Reduced-form versions of the admissible percentage supply offers <br> @see DynTestAMES.JSLT.pdf Eq(36) */
	private double[] alpha;		
													/** Current day */
	public int day;	
													/** Action domain list*/
	//private ArrayList adList;

	private Market market;
	
	public String id;
	public int genCoID;
	public int node;
	public double fcost;
	
	public Hashtable<String, Double> supplyOffer = new Hashtable<String, Double>();
	public Hashtable<String, Double> learningData = new Hashtable<String, Double>();
	public Hashtable<String, Double> reportedSupplyOffer = new Hashtable<String, Double>();
	public Hashtable<String, Double> commitment = new Hashtable<String, Double>();
	public Hashtable<String, Double> lmpNode = new Hashtable<String, Double>();
	
	//private double commitmentD;
	//private double lmpD;
	private VREParameters learningParams;
	private SimpleStatelessLearner learner;
	
	  private double [] oldActionProbability;
	  private double [] newActionProbability;
	  
	  private double priceCap = 1000.0;
	  private double[] triplet;  // alpha's as in Eq(36) of DynTestAMES working pape
	  public static final int ADC = 3;
	  ArrayList adListNew = new ArrayList();
	  private double dActionProbability = 0.001;
	  private boolean bActionProbabilityConverge;
	  private int iCheckDayLengthCount;
	  public int iCheckDayLength;
	  private double [] oldLearningResult;
	  public double dLearningCheckDifference;
	  private int iLearningCheckDayLengthCount;
	  private int iLearningCheckDayLength;
	  private boolean bLearningCheckConverge;
	  private int iDailyNetEarningDayLengthCount;
	  private double [] oldDailyNetEarningResult;
	  private int iDailyNetEarningDayLength;
	  private double dDailyNetEarningThreshold;
	  private boolean bDailyNetEarningConverge;
	  private int iDayCount;
	  private boolean bActionProbabilityCheck;
	  private int iStartDay;
	  private int iLearningCheckDayCount;
	
//C-O-N-S-T-R-U-C-T-O-R-1-----------------------------------------------------------------------------------------------------------------------------  	
	public GenCo(String id, int gencoid, int rn, int node, double fcost, double money, Market mt, Hashtable supplyoffer,Hashtable learningdata){
		
		this.id           		= id;
		this.genCoID            = gencoid;
		this.node         		= node;
		this.supplyOffer 		= supplyoffer;
		this.learningData 		= learningdata;
		this.cumulativeWealth   = money;
		this.fcost        		= fcost;
		this.market       		= mt;

		
		this.aT    = supplyOffer.get("aT");
		this.bT    = supplyOffer.get("bT");
		this.capTL = supplyOffer.get("capTL");
		this.capTU = supplyOffer.get("capTU");
		
	   /* double [] trueOffer=new double [4];
	 
	    double maxPrice=supplyOffer.get("aT")+2*supplyOffer.get("bT")*supplyOffer.get("capTU");

	    trueOffer[0]=supplyOffer.get("aT");
	    trueOffer[1]=supplyOffer.get("bT");
	    trueOffer[2]=supplyOffer.get("capTL");
	    
	    if(priceCap<trueOffer[0]){
	        trueOffer[2]=0;
	        trueOffer[3]=0;
	        
	       // return trueOffer;
	    }
	    
	    if(maxPrice<=priceCap){
	        trueOffer[3]=supplyOffer.get("capTU");
	    }
	    else{
	        trueOffer[3]=(priceCap-supplyOffer.get("aT"))/(2*supplyOffer.get("bT"));}
		
	    this.aT = trueOffer[0];
	    this.bT = trueOffer[1];
	    this.capTL = trueOffer[2];
	    this.capTU = trueOffer[3];
		*/
		
		
		
		
		this.propensity  	   = learningData.get("propensity");
		this.cooling  		   = learningData.get("cooling");
		this.recency           = learningData.get("recency");
		this.experimentation   = learningData.get("experimentation");
		//this.randomSeed        = -890999481;
		this.randomSeed        = rn;
		
		
		this.learningParams = new VREParameters(cooling, experimentation, propensity, recency, randomSeed);	    	
		
		this.m1  			  = learningData.get("m1");
		this.m2  			  = learningData.get("m2");
		this.m3  			  = learningData.get("m3");
		this.rIMaxL   		  = learningData.get("rIMaxL");
		this.rIMaxU   		  = learningData.get("rIMaxU");
		this.rIMinC  		  = learningData.get("rIMinC");
		this.slopeStart  	  = learningData.get("slopeStart");
		this.rewardSelection   = learningData.get("rewardSelection");

		ArrayList adList = actionDomainConstruction();
		
		adListNew = checkActionDomain(adList);
		this.learner = new SimpleStatelessLearner(learningParams, adListNew);
		
    	this.dailyProfit       = 0;
    	this.dailyNetEarnings  = 0;
    	this.dailyRevenue      = 0;
    	
        oldActionProbability=new double[adListNew.size()];
        newActionProbability=new double[adListNew.size()];
        
        bActionProbabilityConverge=false;
        iCheckDayLengthCount=0;
        iCheckDayLength=5;
        oldLearningResult=new double[3];
        dLearningCheckDifference =0.001;
        iLearningCheckDayLengthCount = 0;
        iLearningCheckDayLength=5;
        bLearningCheckConverge=false;
       iDailyNetEarningDayLengthCount=0;
       iDailyNetEarningDayLength=5;
       oldDailyNetEarningResult=new double[iDailyNetEarningDayLength];
       
       dDailyNetEarningThreshold=10.0;
       bDailyNetEarningConverge=false;
       iDayCount=1;
       bActionProbabilityCheck=false;
       iStartDay=1;
       iLearningCheckDayCount=1;
	}

//-------------------------------------------------------------------------------------------------------   
	public ArrayList checkActionDomain(ArrayList actionList){
		    ArrayList newActionList=new ArrayList();
		    for(int i=0; i<actionList.size(); i++){
		        double [] action=(double [])actionList.get(i);
		        double [] newAction=action.clone();
		        
		        if(!checkOverPriceCap(newAction))
		            newActionList.add(newAction);
		    }
		    
		    return newActionList;
		  }	
//-------------------------------------------------------------------------------------------------------   
/** This function is called by the learn method of the agent in order to learn! */
	private void updateDailyPerformance()
	{		
		Hashtable<String, Object> lmpatnode = new Hashtable<String, Object>();
		
		double commitmentHour;
		double lmpHour;
		double hourlyVariableCost;
		double hourlyTotalCost;
		double hourlyProfit;
		double hourlyNetEarning;
		double hourlyRevenue;		
		
		for (int hour=0; hour<market.Hours; hour++){
									    	
			commitmentHour = commitment.get("commit"+hour);		 		
			lmpatnode =(Hashtable)market.lmpAtHour.get("lmpathour"+hour);
			lmpHour = ((Double)lmpatnode.get("lmp"+node)).doubleValue();  // "lmp"+node makes sure that genco matches node

	    	hourlyVariableCost = commitmentHour*(aT + bT * commitmentHour);
	    	hourlyTotalCost    = hourlyVariableCost + fixedCost;
	    	hourlyRevenue      = commitmentHour * lmpHour;	      
	    	hourlyProfit	   = hourlyRevenue - hourlyTotalCost;
	    	hourlyNetEarning   = hourlyRevenue - hourlyVariableCost;
	        
	    	dailyProfit 	  += hourlyProfit;
	    	dailyNetEarnings  += hourlyNetEarning;
	    	dailyRevenue      += hourlyRevenue;				  			
		}   				
	}
	
//-----------------------------------------------------------------------------------------------------------------------------------------
    public void step(SimState state)
    {     	
    		aggregateFunction();  //there is no learning at the very beggining!
   	}		
 

    /**
     * WHAT IT DOES // aR , bR..... ??????????????????????????????????????????
     */
    private void aggregateFunction()
	{
		double[] triplet = (double[]) learner.chooseActionRaw();
		
	//	double lowerRI    = triplet[0];		
	//	double upperRI    = triplet[1];        
	//	double upperRCap  = triplet[2];
	//	double capRL = capTL; 
		
		 aR    = triplet[0];		
		 bR    = triplet[1];        
		 capRU  = triplet[2];
		 capRL = capTL; 
		
		//reportedSupplyOffer.put("aR", aR);
		//reportedSupplyOffer.put("bR", bR);
		//reportedSupplyOffer.put("capRU", capRU);
		//reportedSupplyOffer.put("capRL", capRL);
		
/*
		
		  // Refer to DynTest paper Appendix 5.2 "Implementation of GenAgent's Learning"


		    // Step 0: To get capMaxReported
			 double capRU = upperRCap * (capTU - capTL) + capTL;
			 

		    // Step 1: To get lR
		    double lR = (aT + 2*bT*capTL)/(1 - lowerRI);

		    // Step 2: To get uStart
		    double u = aT + 2*bT*capRU;
		    double uStart;
		    if(lR < u){
		      uStart = u;
		    }
		    else{
		      uStart = lR + slopeStart;
		    }

		    // Step 3: To get uR
		    double uR = uStart/(1 - upperRI);

		    // Step 4: To get bReported
		    double bR = 0.5*((uR - lR)/(capRU - capTL));

		    // Step 5: To get aReported
		    double aR = lR - 2*bR*capTL;
		     
		    // for PriceCap
		    double maxPrice=aR+2*bR*capRU;
		    //System.out.println("\n Before maxPrice "+maxPrice+" for GenCo :"+id);
		    if(maxPrice>priceCap)
		    	capRU=(priceCap-aR)/(2*bR);
		    //maxPrice=aReported+2*bReported*capMaxReported;
		    //System.out.println("\n After maxPrice "+maxPrice+" for GenCo :"+id+"\n");
	*/	   
		    
			reportedSupplyOffer.put("aR", aR);
			reportedSupplyOffer.put("bR", bR);
			reportedSupplyOffer.put("capRU", capRU);
			reportedSupplyOffer.put("capRL", capRL);
		    
			
					if (genCoID == 5){
							//System.out.println("373row    "+id+ "  aR= "+aR+ "  br= "+bR+ "  capRU= "+ capRU);
						System.out.println(+aR+ "       "+bR+ "      "+ capRU);
						}
					}
			
    //------------------------------------------------------------------------------------------------------------------------------------------    
    /** The function checks whether reported supply offer is feasible or not 
     * @see <f/> DynTestAMES.JSLT.pdf (pg 34)  */    
    public boolean  checkOverPriceCap(double [] action){
        double lowerRI   = action[0];
        double upperRI   = action[1];
        double upperRCap = action[2];
        // Step 0: To get capMaxCalculated
        double capRU  = upperRCap  * (capTU - capTL) + capTL;

        // Step 1: To get lR
        double lR = (aT + 2*bT*capTL)/(1 - lowerRI);

        // Step 2: To get uStart
        double u = aT + 2*bT*capRU;
        double uStart;
        if(lR < u){
          uStart = u;
        }
        else{
          uStart = lR + slopeStart;
        }

        if(uStart>=priceCap){
           return true;
            
        }
        
        // Step 3: To get uR
        double uR = uStart/(1 - upperRI);

        // Step 4: To get bReported
        action[1] = 0.5*((uR - lR)/(capRU - capTL));

        // Step 5: To get aReported
        action[0] = lR - 2*action[1]*capTL;
         
        // for PriceCap
        double maxPrice=action[0]+2*action[1]*capRU;
        if(maxPrice>priceCap)
            action[2]=(priceCap-action[0])/(2*action[1]);
        else
            action[2]=capRU;
        
    	//if (genCoID == 2){
    	//	System.out.println("422genagentXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXx");
    	//	System.out.println("  aR= "+action[0]+ "  br= "+action[1]+ "  capRU= "+ action[2]);
    	//	}
        
        
        return false;
    }   
//------------------------------------------------------------------------------------------------------------------------------------------      
    /** Constructs GenCo's action domain */
  /*  private ArrayList actionDomainConstruction(){

    	ArrayList adMat = new ArrayList();
    
    	M = m1 * m2 * m3;

    	double inc1 = rIMaxL/(m1 - 1);
    	double inc2 = rIMaxU/(m2 - 1);
		double inc3 = (1-rIMinC)/(m3-1);
	
		for (double i = 0; i <= rIMaxL; i = i+inc1){
			for (double j = 0; j <= rIMaxU; j = j+inc2 ){
				for(double k = 1-rIMinC; k >= 0; k = k-inc3){  
				
					alpha = new double[3]; // initialize triplet to zeros
					alpha[0] = i; 
					alpha[1] = j; 
	            	alpha[2] = k +rIMinC ; 
	            
	            	adMat.add(alpha);	            		
				}		
    		}
		}
	
		return adMat;
    }
    */
    
    
    
    
    
    private ArrayList actionDomainConstruction(){

    	M = m1 * m2 * m3;
    	double number =10.0;
    	
    	ArrayList actionDomain = new ArrayList();
    	    
    	    if (m1>=1 && m2>=1 && m3>=1 && rIMaxL<1 && rIMaxL>=0 
    	        && rIMaxU<1 && rIMaxU>=0 && rIMinC<=1 && rIMinC>0){
    	      
    	      double inc1 = 0;
    	      double inc2 = 0;
    	      double inc3 = 0;  
    	      double someLargeNumber = number;  
    	      // NOTE: someLargeNumber is used to make inc1-inn3 so large that the 
    	      //       following 3-layer for loops will stop with only the first 
    	      //       iteration, which is desirable for the case where M1-M3 are set to 1
    	      
    	      if(m1==1){
    	        inc1 = number; // special case for only one choice of lower RI 
    	      }
    	      else{
    	        inc1 = rIMaxL/(m1-1);  // incremental step for lower RI
    	      } 
    	      if(m2==1){
    	        inc2 = number; // special case for only one choice of upper RI
    	      }
    	      else{
    	        inc2 = rIMaxU/(m2-1);  // incremental step for upper RI
    	      }
    	      if(m3==1){
    	        inc3 = number; // special case for only one choice of upper Cap
    	      }
    	      else{
    	        inc3 = (1-rIMinC)/(m3-1); // incremental step for upper Cap
    	      }
    	      
    	      for(double i=0; i<=rIMaxL; i=i+inc1){
    	        for(double j=0; j<=rIMaxU; j=j+inc2){
    	          for(double k=1-rIMinC; k>=0; k=k-inc3){
    	            triplet = new double[ADC]; // initialize triplet to zeros (IMPORTANT!)
    	            triplet[0] = i; 
    	            triplet[1] = j; 
    	            triplet[2] = k + rIMinC; 
    	            actionDomain.add(triplet); // NOTE: triplet is passed by reference
    	          }
    	        }
    	      }
    	      //NOTE: the above loop takes care of the case where M1, M2,or M3 are set to 1.
    	    }
    	    else{
    	      System.out.println("INCORRECT PARAMETER RANGES: M1, M2, M3, RIMaxL, RIMaxU, RIMinC.");
    	    }
    	    return actionDomain;
    	  }
    


//------------------------------------------------------------------------------------------------------------------------------------------    	
    /** Updates GenCo's accumulative money holding after energy trade - NOT USED*/
    private void updateWealth(double dailyProfit)
    {
    	this.cumulativeWealth = this.cumulativeWealth+dailyProfit;	
    }
    
//------------------------------------------------------------------------------------------------------------------------------------------    	   
      /** GenCo learning (updating propensity based on current period DailyProfit) - NOT USED*/
	  public void learn(){
		  
		this.updateDailyPerformance();//update my profitData before I learn
		this.updateWealth(dailyProfit); //update my money before I learn
		
		
		SimpleAction lastAction = (SimpleAction) learner.getPolicy().getLastAction();
	    choiceID = lastAction.getID();
	    double[] act = (double[]) lastAction.getAct();
	    REPolicy policy = (REPolicy)learner.getPolicy();
	
	  //  System.out.printf("\tLast action chosen:  id= " + genCoID +
	    //                   ";\t(lowerRI, upperRI, upperRCap)=(%1$6.4f, %2$6.4f, %3$6.4f)\n",
	    //                   act[0], act[1], act[2]);
	    
	   // if(Double.isNaN(policy.getProbability(genCoID))){
	     //   System.out.printf("\tBefore update --> the policy.getProbability return value is not a number!!!\n");

	     //   System.out.printf("\tBefore updating with daily profit: probability=%1$6.4f\tpropensity=%2$f\n",
	      //                   1.0,policy.getPropensity(genCoID));
	  //  }
	  //  else
	      //  System.out.printf("\tBefore updating with daily profit: probability=%1$6.4f\tpropensity=%2$f\n",
	      //                   policy.getProbability(genCoID),policy.getPropensity(genCoID));
	 
	    if(rewardSelection==0)// profit
	    learner.update(new Double(this.dailyProfit));

	    if(rewardSelection==1)// net earnings
	    	learner.update(new Double(this.dailyNetEarnings));
	
	   // if(Double.isNaN(policy.getProbability(genCoID))){
	      //  System.out.printf("\tAfter update --> the policy.getProbability return value is not a number!!!\n");
	        
	     //   System.out.printf("\tAfter updating with daily profit:  probability=%1$6.4f\tpropensity=%2$f\n\n",
	     //                    1.0,policy.getPropensity(genCoID));
	  //  }
	  //  else
	    //    System.out.printf("\tAfter updating with daily profit:  probability=%1$6.4f\tpropensity=%2$f\n\n",
	      //                   policy.getProbability(genCoID),policy.getPropensity(genCoID));
	    
	
	    choiceProbability = policy.getProbability(choiceID);
	    choicePropensity=policy.getPropensity(choiceID);
	    

	        updateActionProbabilities();
	    
	    

	        updateLearningResult();
	    
	    

	        updateDailyNetEarningResult();
	    
	  }

//------------------------------------------------------------------------------------------------------------------------------------------    	   
	  private void updateActionProbabilities(){
	      REPolicy policy = (REPolicy)learner.getPolicy();
	      double [] dProbability=policy.getProbabilities();
	      
	      boolean bConverged=true;
	      for(int i=0; i<adListNew.size(); i++){
	          oldActionProbability[i]=newActionProbability[i];
	          newActionProbability[i]=dProbability[i];
	         if((bConverged)&&(Math.abs(newActionProbability[i]-oldActionProbability[i])>dActionProbability)){
	              bConverged=false;
	              iCheckDayLengthCount=0;
	              bActionProbabilityConverge=false;
	          }
	      }
	      
	      if(bConverged){
	          iCheckDayLengthCount++;
	          if(iCheckDayLengthCount>iCheckDayLength)
	              bActionProbabilityConverge=true;
	      }
	 }
//------------------------------------------------------------------------------------------------------------------------------------------    	   
	  private void updateLearningResult(){
	      
	      boolean bConverged=true;
	      
	      if(Math.abs(oldLearningResult[0]-aR)>dLearningCheckDifference)
	          bConverged=false;
	      
	      if(Math.abs(oldLearningResult[1]-bR)>dLearningCheckDifference)
	          bConverged=false;
	      
	      if(Math.abs(oldLearningResult[2]-capRU)>dLearningCheckDifference)
	          bConverged=false;
	      
	      oldLearningResult[0]=aR;
	      oldLearningResult[1]=bR;
	      oldLearningResult[2]=capRU;
	      
	      if(bConverged){
	          iLearningCheckDayLengthCount++;
	          if(iLearningCheckDayLengthCount>iLearningCheckDayLength)
	              bLearningCheckConverge=true;
	      }
	      else{
	          iLearningCheckDayLengthCount=0;
	          bLearningCheckConverge=false;
	      }
	  }
	//------------------------------------------------------------------------------------------------------------------------------------------    	   

	  private void updateDailyNetEarningResult(){
	      
	      boolean bConverged=true;
	      
	      for(int i=0; i<iDailyNetEarningDayLengthCount; i++){
	          if(Math.abs(oldDailyNetEarningResult[i]-dailyNetEarnings)>dDailyNetEarningThreshold){
	              bConverged=false;
	              break;
	          }
	      }
	      
	      if(bConverged){
	          iDailyNetEarningDayLengthCount++;
	          if(iDailyNetEarningDayLengthCount>=iDailyNetEarningDayLength){
	              bDailyNetEarningConverge=true;
	              iDailyNetEarningDayLengthCount=iDailyNetEarningDayLength;
	              
	              for(int j=0; j<iDailyNetEarningDayLengthCount-1; j++){
	                  oldDailyNetEarningResult[j]=oldDailyNetEarningResult[j+1];
	              }
	              oldDailyNetEarningResult[iDailyNetEarningDayLengthCount-1]=dailyNetEarnings;
	          }
	          else
	              oldDailyNetEarningResult[iDailyNetEarningDayLengthCount-1]=dailyNetEarnings;
	      }
	      else{
	          oldDailyNetEarningResult[0]=dailyNetEarnings;
	          iDailyNetEarningDayLengthCount=0;
	          bDailyNetEarningConverge=false;
	      }
	  }


//------------------------------------------------------------------------------------------------------------------------------------------    	   
	/** Returns learner */
    public SimpleStatelessLearner getLearner(){
	   System.out.println(" get Learner from genco "); 
	   return learner;
	}
    

} 

   













