package lip6.ivi;
import gurobi.*;

import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Random;
import java.util.Scanner;



public class MdpInteractive{
	protected int nState;
	protected int nAction;
	protected Matrix valueOldState;
	protected Matrix vNew;
	protected double epsilon; 
	protected double gamma; 
	protected double [][][] qValue;
	protected int [] policy;
	protected int nReward;
	protected int nbQuery = 0;
	protected Vector rewardSet; 
	protected float eta = 1; 
	protected double[][][] reward; 
	protected double [][][] transition;
	protected int nbrKdominate = 0;      
	protected int nbrParetoDominate = 0;
	protected int nbrIteration = 0;
	protected static Random rnd;


	public static void main(String[] args) throws GRBException, IOException {
		Scanner input = new Scanner(System.in);
		System.out.println("#States");
		int nStates = Integer.parseInt(input.next());
		System.out.println("#Actions");
		int nActions = Integer.parseInt(input.next());
		System.out.println("#Rewards");
		int nbRewards = Integer.parseInt(input.next());
		System.out.println("Gamma");
		double gamma = Double.parseDouble(input.next());
		System.out.println("Epsilon");
		double epsi = Double.parseDouble(input.next());
		System.out.println("#Seed of Random");
		rnd = new Random(Integer.parseInt(input.next()));
		int [] policy = new int [nStates];
		int nTest = 100;
		BufferedWriter buffer = null; 
		try {
         	buffer = new BufferedWriter(new FileWriter("resultIvi.csv"));
        } catch (IOException e1) {
			e1.printStackTrace();
		}
		for (int t = 1; t <= nTest ; t ++) {
		MdpInteractive mdpIvi = new MdpInteractive(nStates, nActions, gamma, epsi, nbRewards);
		System.out.println(mdpIvi.toString(false));
		policy = mdpIvi.interactiveValueIteration();
		StringBuffer chaine =  new StringBuffer();
		chaine.append(mdpIvi.showValueStates()+"\n");
		chaine.append("Policy  = (");
		for (int i = 0; i < nStates; i++) {
			chaine.append(policy[i]+" ");
		}
		buffer.write("Test " +t+ "; # Query => ;" +mdpIvi.getNbQuery()+"; Policy => ;" +mdpIvi.showPolicy(policy));
		buffer.newLine();
		buffer.flush();
		chaine.append(") \n");
		chaine.append("#Iteration = "+ mdpIvi.getNbIteration()+ "\n");
		chaine.append("#Query = " +mdpIvi.getNbQuery()+"\n");
		chaine.append("#Kdominate Succeeded = " +mdpIvi.getnbkd()+"\n");
		chaine.append("#ParetoDominate Succeeded = "+mdpIvi.getnbpD()+"\n");
		System.out.println(chaine.toString());
		chaine.setLength(0);
		}
	}

	public MdpInteractive(int nStates, int nActions, double gamma, double epsilon, int nbrewards , double [][][] transition){
		this.nState = nStates;
		this.nAction = nActions;
		this.gamma = gamma;
		this.epsilon = epsilon;
		this.transition = transition;
		this.nReward = nbrewards;
		rewardSet = new Vector(nReward);
		this.rewardSet.computeScale(nReward, rnd);
		valueOldState = new Matrix(nStates, nReward);
		vNew = new Matrix(nStates, nReward);
		policy = new int [this.nState];
		qValue = new double [nStates][nActions][nReward];	
		this.reward = new double[nStates][nActions][nReward];
		Matrix r = new Matrix(nStates, nActions);
		for(int i = 0; i < nStates; i ++){
			for (int j = 0; j < nActions; j ++){
				r.setValue(i, j, rewardSet.get(rnd.nextInt(nReward)));
			}
		}
		for (int i = 0; i < nStates; i ++) {
			for (int j = 0; j < nActions; j ++) {
				for (int k = 0; k < nReward; k ++) {
					if (r.getValue(i,j) == rewardSet.get(k)) {
						this.reward[i][j][k] = 1;
					}
				}
			}
		}
	}

	public MdpInteractive(int nState, int nAction, double gamma, double epsilon, Vector scale, double [][][] transition, double [][] reward){
		this.nState = nState;
		this.nAction = nAction;
		this.gamma = gamma;
		this.epsilon = epsilon;
		this.transition = transition;
		this.nReward = scale.size();
		rewardSet = scale;
		valueOldState = new Matrix(nState, nReward);
		vNew = new Matrix(nState, nReward);
		policy = new int [this.nState];
		qValue = new double [nState][nAction][nReward];	
		this.reward = new double[nState][nAction][nReward];
		for (int i = 0; i < nState; i ++) {
			for (int j = 0; j < nAction; j ++) {
				for (int k = 0; k < nReward; k ++) {
					if (reward[i][j] == rewardSet.get(k)) {
						this.reward[i][j][k] = 1;
					}
				}
			}
		}
	}

	// Construit une instance aléatoire de MDP
	public MdpInteractive(int nStates, int nActions, double gamma, double epsilon, int nRewards){
		this.nState = nStates;
		this.nAction = nActions;
		this.gamma = gamma;
		this.epsilon = epsilon;
		this.nReward = nRewards;
		transition = new double[nStates][nActions][nStates];
		for (int i = 0; i < nStates; i ++) {
			for (int j = 0; j < nActions; j ++) {
				double sum = 0.0;
				for (int k = 0; k < nStates; k ++){
					transition[i][j][k] = rnd.nextDouble();
					sum += transition[i][j][k];
				}
				for (int l = 0; l < nStates; l ++) {
					transition[i][j][l] /= sum;
				}  
			}  
		}
		rewardSet = new Vector(nReward);
		this.rewardSet.computeScale(nReward, rnd);
		valueOldState = new Matrix(nStates, nReward);
		vNew = new Matrix(nStates, nReward);
		policy = new int [this.nState];
		qValue = new double [nStates][nActions][nReward];	
		this.reward = new double[nStates][nActions][nReward];
		Matrix r = new Matrix(nStates, nActions);
		for (int i = 0; i < nStates; i ++) {
			for (int j = 0; j < nActions; j ++) {
				r.setValue(i, j, rewardSet.get(rnd.nextInt(nReward)));
			}
		}
		for (int i = 0; i < nStates; i ++) {
			for (int j = 0; j < nActions; j ++) {
				for (int k = 0; k < nReward; k ++) {
					if (r.getValue(i,j) == rewardSet.get(k)) {
						this.reward[i][j][k]=1;
					}
				}
			}
		}
	}

	public void initValueStates(){ 
		StringBuffer chaine= new StringBuffer();
		chaine.append("************************INIT OF VALUE STATES *******************************\n");
		for (int i = 0; i < nState; i ++) {
			chaine.append("Vo(S"+i+") = (");
			for(int j = 0; j < nReward; j ++) {
				chaine.append(valueOldState.getValue(i,j)+" ");
			}
			chaine.append(") ; ");
		}
		System.out.println(chaine.toString());
	}

	public ArrayList<double []> init(){
		ArrayList<double[]> k = new ArrayList<double[]>();
		for (int i = 0; i < nReward-1; i ++) {
			double [] k1 = new double [this.nReward]; 
			k1[i] = -1;
			k1[i+1] = 1;
			k.add(i, k1);
		}
		//		System.out.println(showPolytope(k));
		return k;
	}

	public String showPolytope(ArrayList<double []> p){
		StringBuffer chaine = new StringBuffer();
		chaine.append("INIT OF POLYTOPE \n");
		for(int i = 0; i < p.size(); i ++){
			for (int j = 0; j < p.get(i).length; j ++){
				chaine.append(p.get(i)[j]+" ");
			}
			chaine.append("\n");
		}
		return chaine.toString();
	} 

	public String showPolicy(int [] policy){
		StringBuffer chaine = new StringBuffer();
		chaine.append("(");
		for (int i = 0; i < policy.length; i ++){
			chaine.append(policy[i]+" ");
		}
		chaine.append(")");
		return chaine.toString();
	}

	public String toString(boolean verbose){
		StringBuffer chaine=  new StringBuffer();
		chaine.append("ORDINAL REWARD MDP *******  STATES = " +this.nState+  ",  ACTIONS = " +this.nAction+ ", NUMBER OF REWARDS = " +this.nReward + ", GAMMA = "+ this.gamma+ ", EPSILON = " +this.epsilon +"\n");
		if (verbose) {
			chaine.append( " ******************************** TRANSITION MATRIX ************************************ \n");
			for (int i = 0; i < this.nState; i ++) {
				chaine.append(" etat ="+i);
				for (int j = 0; j < this.nAction; j ++) {
					chaine.append(" action ="+j);
					for (int e = 0; e < nState; e ++) {
						chaine.append(", etat' ="+e+ "-->" +transition[i][j][e]);
					}
				}
				chaine.append("\n");
			}
			chaine.append("**************************Scale of Rewards*************************\n");
			chaine.append(this.rewardSet.toString()+"\n");
			chaine.append( " ********************************* REWARDS *********************************************\n");
			for (int i = 0; i < this.nState; i ++) {
				chaine.append(" etat = "+i);
				for (int j = 0; j < nAction; j ++){
					chaine.append(", action ="+j );
					chaine.append(", Reward = (" );
					for( int w = 0; w < nReward; w ++){
						chaine.append(" "+ reward[i][j][w]);
					}
					chaine.append(")");
				}
				chaine.append("\n");
			}
		}
		return chaine.toString(); 
	}

	public int [] interactiveValueIteration() throws GRBException{
		int t = 0;
		ArrayList<double []> k = this.init();
		boolean stop = false;
		boolean result = false;
		Vector vTempState = new  Vector(nState);
		do {
			t=t+1;
			for(int i = 0; i < nState; i ++) {
				Vector best = new Vector(nReward);
				Vector vt = new Vector(nReward);
				int bestAction= Integer.MIN_VALUE;
				for (int j = 0; j < nAction; j ++) { 
					Vector qValueTempStates = new Vector(nReward);
					for (int w = 0; w < nReward; w ++){ 
						double tmps = 0.0;
						for (int e = 0; e < nState; e ++){ 
							tmps += transition[i][j][e] * valueOldState.getValue(e,w);
						}
						qValue[i][j][w] = reward[i][j][w] + gamma * tmps;
						qValueTempStates.add(w, qValue[i][j][w]);
					}
					vt = getBest(best, qValueTempStates, k);
					best = (Vector) vt.clone();
					if(qValueTempStates.equals(best)){bestAction=j;}
				}
				vNew.updateMatrix(best, i);
				policy[i]=bestAction;
			}

			vTempState = computeDistance(vNew,valueOldState);
			result = testStop(vTempState, epsilon);
			if (result) {
				stop = true;
				this.nbrIteration=t;
			} else {
				valueOldState.updateValueOldStates(vNew);
			}
			if (t%5==0){ System.out.println("Step = "+t+"\t max Error = "+vTempState.getMaxValue());}
		} while (!stop);
		return (policy);
	}

	public Vector computeDistance(Matrix m1, Matrix m2){
		int rows = m1.getNumberOfRows();
		int columns = m1.getNumberOfColumns();
		Vector v1 = new Vector(rows);
		for (int i = 0; i < rows; i ++) {
			Vector v2 = new Vector(columns);
			for(int j = 0; j < columns; j ++){
				v2.add(j, Math.abs(m1.getValue(i, j) - m2.getValue(i, j)));
			}
			v1.add(i, v2.getMaxValue());
		}
		return v1;
	}

	public String showValueStates(){
		StringBuffer chaine= new StringBuffer();
		chaine.append("*******************************LAST VALUE STATES***************************************\n");
		for (int i = 0; i < this.vNew.getNumberOfRows(); i ++) {
			chaine.append(" V("+i+") = (");
			for(int j = 0; j < this.vNew.getNumberOfColumns(); j ++) {
				chaine.append(vNew.getValue(i, j)+" ");
			}
			chaine.append(") , ");
		}
		return chaine.toString();
	}

	public boolean testStop(Vector vT, double epsilon){
		for (int i = 0; i < vT.size(); i ++){
			if (vT.get(i) <= epsilon) {
				return true;
			}
		}
		return  false;
	}

	public void show(Vector v1, Vector v2, String s){
		System.out.println(v1.toString());
		System.out.println(s);
		System.out.println(v2.toString() );
	}

	public int computeBestAction(Matrix m, ArrayList<double[]> k) throws GRBException{
		int item  = -2;
		int count = 0;  
		for (int i = 0; i < m.numberOfRows; i ++) {
			Vector v = m.getRow(i);   
			for (int j = 0; j < m.numberOfRows; j ++){
				Vector v2 = m.getRow(j);
				int result = getBestWithoutQuery(v, v2, k);
				if(result == 1) count++;
			}
			if(count == m.numberOfRows){
				item = i;
			}
			count = 0;
		}
		return item;
	}

	public int getBestWithoutQuery(Vector v1, Vector v2, ArrayList<double[]> k) throws GRBException{
		boolean result = false;
		result =  v1.compare(v2);
		if (result) { 
			this.nbrParetoDominate++;
			return 1;
		} else {  
			result = v2.compare(v1);
			if (result) {
				this.nbrParetoDominate++;
				return -1;
			} else {
				result = kDominate(v1, v2, k, eta, 1);
				if (result) {
					return 1;
				} else {
					result = kDominate(v1, v2, k, eta, -1);
					if (result) {
						return -1;
					}
				}
			}
		}

		return 0;
	}

	public Vector getBest(Vector v1, Vector v2, ArrayList<double []> k) throws GRBException{
		boolean result = false;
		if (v1.size() == 0){return v2;}
		if (v2.size() == 0){return v1;}
		result = v1.compare(v2);
		if (result) { 
			this.nbrParetoDominate++;
			return v1;
		} else {
			result = v2.compare(v1);
			if (result) {
				this.nbrParetoDominate++;
				return v2;
			} else {  
				result = kDominate(v1,v2,k,eta,1);  
				if(result) {
					return v1;
				} else {
					result = kDominate(v1,v2,k,eta,-1);
					if (result) {
						return v2;
					} else { 
						boolean resultQuery = query(v1,v2); 
						if (resultQuery) {
							this.updateK(v1, v2, k);
							return v1.clone();
						} else {
							this.updateK(v2, v1, k);
							return v2.clone();
						}
					}
				}
			}
		}
	} 

	/*	private boolean redundant(double[] t, ArrayList<double[]> k) throws GRBException {
		boolean result = false;
		GRBEnv env = new GRBEnv(); 
		GRBModel model = new GRBModel(env);
		model.set(GRB.StringAttr.ModelName, "LP.Redundant");
		GRBVar[] varDecision = new GRBVar[nElements];
		for (int i=0; i<nElements; i++){
			varDecision[i] = model.addVar(0,5,t[i], GRB.CONTINUOUS, "reward"+i);
		}
		// sens de l'objectif minimisation (+1) maximisation (-1)
		model.set(GRB.IntAttr.ModelSense, 1);		
		model.update();
		for(int p=0; p<k.size(); p++){
			GRBLinExpr leftHandSide = new GRBLinExpr();
			for(int w=0; w<k.get(0).length; w++){   
				leftHandSide.addTerm(k.get(p)[w],varDecision[w]);
			}
			if(p<this.nElements-1) {
				model.addConstr(leftHandSide , GRB.GREATER_EQUAL, eta , "constraint"+p);
			}
			else{ 
				model.addConstr(leftHandSide , GRB.GREATER_EQUAL, 0 , "constraint"+p);
			}
		}
		model.getEnv().set(GRB.IntParam.Method ,GRB.METHOD_CONCURRENT);
		model.optimize();
		double objectiveValue = model.get(GRB.DoubleAttr.ObjVal);
		if (objectiveValue>0)
		{	
			result=true; 
		}
		return result;
	}*/

	public boolean kDominate(Vector v1, Vector v2, ArrayList<double []> k, double eta, int sens) throws GRBException{
		GRBEnv env = new GRBEnv(); 
		env.set(GRB.IntParam.LogToConsole, 0);
		env.set(GRB.IntParam.OutputFlag, 0);
		env.set(GRB.StringParam.LogFile, "");
		GRBModel model = new GRBModel(env);
		model.set(GRB.StringAttr.ModelName, "LP.IVI");
		GRBVar[] varDecision = new GRBVar[nReward];  //les variables de decisions
		for (int i = 0; i < nReward; i ++) {
			varDecision[i] = model.addVar(0, this.rewardSet.get(this.nReward-1),  v1.get(i) - v2.get(i), GRB.CONTINUOUS, "reward"+i);
		}
		model.set(GRB.IntAttr.ModelSense, sens);
		model.update();		
		for (int p = 0; p < k.size(); p ++) {   
			GRBLinExpr leftHandSide = new GRBLinExpr();   
			for (int w = 0; w < k.get(0).length; w ++) {   
				leftHandSide.addTerm(k.get(p)[w], varDecision[w]);
			}
			if (p < this.nReward-1) {
				model.addConstr(leftHandSide, GRB.GREATER_EQUAL, eta, "constraint"+p);
			} else {
				model.addConstr(leftHandSide, GRB.GREATER_EQUAL, 0, "constraint"+p);
			}
		}
		model.getEnv().set(GRB.IntParam.Method , GRB.METHOD_CONCURRENT);
		model.optimize();
		double objectiveValue = model.get(GRB.DoubleAttr.ObjVal);
		model.dispose();
		env.dispose();
		if (sens == 1 && objectiveValue >= 0) { 
			this.nbrKdominate++;
			return true;
		}
		if (sens == -1 && objectiveValue <= 0) {
			this.nbrKdominate++;
			return true;
		}

		return false;
	}

	// mets à jour le polytope K avec l'inégalité (v1-v2).r >= 0
	public void updateK(Vector v1, Vector v2, ArrayList<double[]> k){
		double [] constraint = new double [this.nReward];		
		for (int i = 0; i < v1.size(); i ++){
			constraint[i] = v1.get(i) - v2.get(i);
		}
		k.add(constraint); 
	}

	public boolean query(Vector v1, Vector v2){ 
		this.nbQuery++;  
		double sum = 0.0;
		for (int i = 0; i < v1.size(); i ++){
			sum += (v1.get(i) - v2.get(i)) * this.rewardSet.get(i);
		}
		if (sum>=0) {
			return true;
		}
		else return false;
	}

	public int getNstate(){
		return (nState);
	}

	public int getNaction(){
		return (nAction);
	}

	public int getNelement(){
		return (nReward);
	}

	public double getEpsilon(){
		return (epsilon);
	}

	public double getGamma(){
		return (gamma);
	}

	public double getTransition(int stat1, int action, int stat2){
		return transition[stat1][action][stat2];
	}

	public double getReward(int state, int action, int goal){
		return reward[state][action][goal];
	}

	public Vector getValueState(int s){
		return this.vNew.getRow(s);
	}

	public float getEta(){
		return this.eta;
	}

	public int getPolicy(int s){
		return (int) this.policy[s];
	}

	public int getNbQuery(){
		return this.nbQuery;
	}
	public int getnbkd(){
		return this.nbrKdominate;
	}

	public int getnbpD(){
		return this.nbrParetoDominate;
	}

	public int getNbIteration(){
		return this.nbrIteration;
	}

	public void setRewardSet(int i, int o){
		rewardSet.set(i, (double)o);
	}

	public void addElementToRewardSet(int i, int e){
		rewardSet.add(i, (double) e);
	} 

	public void initRewardSet(int size){
		for (int i = rewardSet.size(); i < size + rewardSet.size(); i ++){
			rewardSet.add(i, 0.0);
		}
	}
	public void setNelement(int nbr){
		this.nReward = nbr;
	}

	public void setEta(float i){
		this.eta = i;		
	}
}

