package lip6.ivi;

import java.util.Random;


public class MdpMonoObjectif {

	private int nState; 
	Vector valueOldState;
	Vector vNew;
	private int nAction;
	private double  epsilon;
	private double gamma;   
	double  qvalue [][]; 
	private int policy []; 

	private double reward [][];       
	private double transition [][][];  
	double maxValue = Double.NEGATIVE_INFINITY;
	private int nElement;
	Matrix k = new Matrix(nElement, nElement);
	public MdpMonoObjectif(int nEtat, int nAction, double gamma, double epsilon, double [][][] transition, double [][] reward){
		this.nState = nEtat;
		this.nAction = nAction;
		this.gamma = gamma;
		this.epsilon = epsilon;
		this.transition = transition;
		this.reward = reward;
		valueOldState = new Vector(this.nState) ;;
		vNew  = new Vector(this.nState);
		qvalue = new double [nState][nAction];
		policy = new int [nState];
	}

	public static void main(String[] args) {
		int nEtat = 10;
		int [] policy = new int [nEtat];
		int  nAction =100; 
		double gam= 0.95;
		double epsi = 0.01;
		MdpMonoObjectif mdp01 = new MdpMonoObjectif(nEtat, nAction, gam, epsi);
		System.out.println(mdp01.toString());
		mdp01.initValueState();
		policy = mdp01.valueIteration();
		StringBuffer chaine =  new StringBuffer();
		chaine.append("Policy  = (");
		for (int i = 0; i < nEtat; i ++) {
			chaine.append(policy[i]+" ");
		}
		chaine.append(") \n");
		System.out.println(chaine.toString());
	}

	public MdpMonoObjectif(int nEtat, int nAction, double gamma, double epsilon){
		this(nEtat, nAction, gamma, epsilon,  new double [nEtat][nAction][nEtat], new double [nEtat][nAction]);
		Random r = new Random();
		for (int i = 0; i < nEtat; i ++) {
			for (int j = 0; j < nAction; j ++) {
				double sum = 0;
				for (int k = 0; k < nEtat; k ++) {
					this.transition[i][j][k] = r.nextDouble();
					sum += this.transition[i][j][k];
				}
				for (int k = 0; k < nEtat; k ++) {
					this.transition[i][j][k] /= sum;
				}
			}
		}
		for (int i = 0; i < nEtat; i ++) {
			for (int j = 0; j < nAction; j++) {
				this.reward[i][j] = r.nextInt(10);
			}
		}
	}

	public String toString(boolean verbose){
		StringBuffer chaine=  new StringBuffer();
		chaine.append("MDP Mono-Objectif *******  STATES = " +this.nState+  ",  ACTIONS = " +this.nAction+ ", GAMMA = "+ this.gamma+ ", EPSILON = " +this.epsilon +"\n");
		if (verbose == true) {
			chaine.append( " ******************************** TRANSITION MATRIX ************************************ \n");
			for (int i = 0; i < this.nState; i ++) {
				chaine.append(" etat ="+i);
				for (int j = 0; j < this.nAction; j ++) {
					chaine.append(" action ="+j);
					for (int e = 0; e < nState; e ++) {
						chaine.append(", etat' ="+e+ "-->" +transition[i][j][e]);
					}
				}
				chaine.append("\n");
			}

			chaine.append( " ********************************* REWARDS MATRIX *********************************************\n");
			for (int i = 0; i < this.nState; i ++) {
				chaine.append(" etat = "+i);
				for (int j = 0; j < nAction; j ++){
					chaine.append(", Reward = " );
					chaine.append(" "+ reward[i][j]);
				}
			}
			chaine.append("\n");

		}
		return chaine.toString(); 
	}

	public void initValueState(){
		for (int i = 0; i < nState; i ++) {
			valueOldState.add(i,0.0);
		}
		for( int i = 0; i < nState; i ++) {
			for (int j = 0; j < nAction; j ++) {
				qvalue[i][j] = 0.0;
			}	
		}
	}

	public int [] valueIteration(){
		int t=0;
		initValueState();
		double temp = 0.0;
		Vector vTempStates  = new  Vector (nState);
		vTempStates.init(nState);
		Vector qTempStates  = new  Vector (nAction);
		qTempStates.init(nAction);
		boolean arret = false;
		do{
			t=t+1;
			for (int i = 0; i < nState; i ++) {
				vNew.add(i, Double.NEGATIVE_INFINITY);
				for (int j = 0; j < nAction; j ++) {
					for (int e = 0; e < nState; e ++) {
						temp += transition[i][j][e] * valueOldState.get(e);
					}
					qvalue[i][j] = reward[i][j] + gamma * temp;
					temp = 0.0;
				}
				vNew.set(i,getMaxValue(qvalue[i]));	
			}
			for (int i = 0; i < nState; i ++) {
				vTempStates.set(i, Math.abs(valueOldState.get(i) - vNew.get(i)));
			}
			maxValue = vTempStates.getMaxValue();
			if (maxValue < epsilon) {
				arret = true;
			} else { 
				for (int i = 0 ;i < nState; i ++) {
					valueOldState.set(i,vNew.get(i));
				}
			}
			if (t%10 ==0 ) {System.out.println( "Step " +t+ " \t Max Error " + maxValue);}
		} while (!arret);
		for (int i = 0; i < nState; i ++) {
			for(int j=0; j<nAction; j++){
				qTempStates.set(j, qvalue[i][j]);
			}
			this.policy[i] = qTempStates.getItemMaxValue();
		}
		return policy;
	}
	public String toString(){
		StringBuffer chaine =  new StringBuffer();
		for (int i = 0; i < this.nState; i ++){
			for (int j = 0; j < this.nAction; j ++) {
				for (int e = 0; e <nState; e ++) {
					chaine.append("etat ="+i+" ,  action ="+j+" ,etat' ="+e+ "-->" +transition[i][j][e] +"\n");
				}
			}
		}
		return chaine.toString();
	}

	public double getMaxValue(double tableau []){
		double valeurMax=Double.NEGATIVE_INFINITY;
		for (int i = 0; i < tableau.length; i ++) {
			if (tableau[i] > valeurMax) {
				valeurMax = tableau[i];
			}
		}
		return valeurMax;
	}

	public int getNState(){
		return nState;
	}

	public int getNActions(){
		return nAction;
	}

	public double getTransition(int s, int a, int sp){
		return transition[s][a][sp];
	}

	public double getReward(int s, int a){
		return reward[s][a];
	}

	public double getValueState(int s){
		return this.vNew.get(s);
	}
	
	public double getMaxvalue(){
		return (maxValue);
	}

	public double getEpsilon(){
		return this.epsilon;
	}

	public String showPolicy(int[] policy){
		StringBuffer chaine = new StringBuffer();
		chaine.append("(");
		for(int i = 0; i<policy.length; i ++) {
			chaine.append(policy[i]+ " ");
		}
		chaine.append(")");
		return chaine.toString();
	}

}