package com.ecust.ml.cardriving.ivl;

import java.io.IOException;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.List;

import Jama.Matrix;

import com.ecust.ml.cardriving.model.Car;
import com.ecust.ml.libsvm.Main;
import com.fly.lib.util.FileUtil;
import com.fly.lib.util.JsonUtil;

/**
 * 逆向强化学习算法
 * @author pleasebugme
 *
 */
public class InverseReinfocementLearning {

	public static final float LAMDA = 0.99f;
	public static final int FEATURE_LENGTH = 15;
	public static final int LANEON_INIT = 3;
	public static final int ITERATOR_COUNT = 20;
	
	public static final float e = 0.01f;
	
	public static final float RL_MAX_ERROR = 0.05f;
	
	private Matrix dataMatrix;
	
	private List<Policy> policyList = new ArrayList<Policy>();
	

	public InverseReinfocementLearning() {
		loadData();
		//compute();
		try {
			computeByLibSVM();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
		// optimal();
	}
	
	private void optimal() {
		double[][] d = new double[][]{{-0.0439}, {0.0078}, {0.0078}, {0.0318}, {-0.0035}, {0.032}, {0.024}, {0.016}, {0.008}, {0.0001}, {-0.007}, {-0.014}, {-0.0767}, {0.012}, {0.034}};
		Policy policy = this.computePolicyByRLAlgorithm(new Weight(new Matrix(d)));
		policyList.add(policy);
	}
	
	public InverseReinfocementLearning(Matrix dataMatrix) {
		this.dataMatrix = dataMatrix;
		compute();
	}
	
	public InverseReinfocementLearning(String fileName) {
		String weightData = FileUtil.readToString(fileName, "utf-8");
		double[][] d = JsonUtil.toBean(weightData, double[][].class);
		Matrix weightMatrix = new Matrix(d);
		Weight weight = new Weight(weightMatrix);
		Policy policy = computePolicyByRLAlgorithm(weight);
		this.policyList.add(policy);
		//compute();
	}
	
	/**
	 * 由计算出的policy，根据当前状态值，输出action
	 * @param stateMatrix 4x1
	 * @return lane 1~4
	 */
	public int getActionByStateMatrix(int laneOn, int distanceOf2, int distanceOf3, int distanceOf4) {
		int stateIndex = getStateIndexByState(new State(laneOn, distanceOf2, distanceOf3, distanceOf4));
		return getActionByPolicy(policyList.get(policyList.size() - 1), stateIndex-1000);
	}
	
	private void loadData() {
		String content = FileUtil.readToString("/Users/pleasebugme/Desktop/nasty130", "utf-8");
		String[] lines = content.split("\n");
		double[][] dataArray = new double[lines.length][4];
		for(int i=0; i<lines.length; i++) {
			String[] datas = lines[i].split(",");
			for(int j=0; j<datas.length; j++) {
				dataArray[i][j] = Integer.valueOf( datas[j] );
			}
		}
		dataMatrix = new Matrix(dataArray);
	}
	
	/**
	 * 计算特征期望值
	 * @param policy 策略 5000x1
	 * @return 特征期望值 15x1
	 */
	public FeatureExpectation computeFeatureExpectation(Policy policy) {
		
		Matrix trajectoryDataMatrix = getMatrixColumn(dataMatrix, 1, 3);
		
		Matrix featureExpectation = new Matrix(FEATURE_LENGTH, 1);
		int laneOn = (int) dataMatrix.get(0, 0);
		int trajectoryDataSize = trajectoryDataMatrix.getRowDimension();
		for(int i=0; i<trajectoryDataSize; i++) {
			
			Matrix stateMatrix = new Matrix(4, 1);
			stateMatrix.setMatrix(0, 0, 0, 0, new Matrix(1, 1, laneOn));
			stateMatrix.setMatrix(1, 3, 0, 0, getMatrixRow(trajectoryDataMatrix, i).transpose());
			// 叠加值
			State state = new State((int) stateMatrix.get(0, 0), (int) stateMatrix.get(1, 0), (int) stateMatrix.get(2, 0), (int) stateMatrix.get(3, 0));
			Feature feature = getFeatureByState(state);
			featureExpectation.plusEquals(feature.getFeatureMatrix());
			
			laneOn = getActionByPolicy(policy, getStateIndexByState(state)-1000);
		}
		
		// 结果除以训练集总数
		return new FeatureExpectation(featureExpectation.arrayTimes(new Matrix(FEATURE_LENGTH, 1, 1.0/trajectoryDataSize)));
	}
	
	private int getActionByPolicy(Policy policy, int stateIndex) {
		int result = (int) policy.getValueMatrix().get(stateIndex, 0);
		return result;
	}
	
	/**
	 * 计算expert特征期望值
	 * @return 特征期望值 15x1
	 */
	public FeatureExpectation computeOptimalFeatureExpectation() {
		
		Matrix featureExpectation = new Matrix(FEATURE_LENGTH, 1);
		
		int trajectoryDataSize = dataMatrix.getRowDimension();
		for(int i=0; i<trajectoryDataSize; i++) {
			double[][] lamdaArray = new double[FEATURE_LENGTH][1];	
			for(int j=0; j<FEATURE_LENGTH-1; j++) {
				if(j <=4) {
					lamdaArray[j][0] = 1;
				} else {
					// TODO
					lamdaArray[j][0] = 1;//Math.pow(LAMDA,i);
				}
			}
			Matrix rowData = getMatrixRow(dataMatrix, i);
			State state = new State((int) rowData.get(0, 0), (int) rowData.get(0, 1), (int) rowData.get(0, 2), (int) rowData.get(0, 3));
			featureExpectation.plusEquals(getFeatureByState(state).getFeatureMatrix().arrayTimes(new Matrix(lamdaArray)));
		}
		
		return new FeatureExpectation(featureExpectation.arrayTimes(new Matrix(FEATURE_LENGTH, 1, 1.0/trajectoryDataSize)));
	}
	
	/**
	 * 计算
	 * @return policyMatrix
	 */
	public void compute() {
		
		List<Weight> weightList = new ArrayList<Weight>();
		List<T> tList = new ArrayList<T>();
		List<FeatureExpectation> featureExpectationList = new ArrayList<FeatureExpectation>();
		List<FeatureExpectation> featureExpectationBarList = new ArrayList<FeatureExpectation>();
		
		// t(0),t(1)
		tList.add(new T(0));
		tList.add(new T(0));
		
		// u(E)
		FeatureExpectation featureExpectationOptimal = this.computeOptimalFeatureExpectation();
		
		// policy(0) = random Policy
		// TODO
		//Policy randomPolicy = new Policy();
		Policy randomPolicy = new Policy(this.dataMatrix.getArray());
		policyList.add(randomPolicy);
		
		// 初始化u(0)
		FeatureExpectation featureExpectation0 = computeFeatureExpectation(randomPolicy);
		featureExpectationList.add(featureExpectation0);
		
		// 初始化u(0)bar
		featureExpectationBarList.add(featureExpectation0);
		
		// 初始化w(0), w(1)
		weightList.add(new Weight());
		weightList.add(new Weight(featureExpectationOptimal.getValue().minus(featureExpectation0.getValue())));
		//System.out.println("w(1)="+JsonUtil.toJsonString(weightList.get(1).getWeightMatrix().getArray()));
		
		// 计算policy(1)
		policyList.add(computePolicyByRLAlgorithm(weightList.get(1)));
		
		// 初始化u(1)
		featureExpectationList.add(computeFeatureExpectation(policyList.get(1)));
		
		for(int i=2; i<ITERATOR_COUNT; i++) {
			System.out.println("迭代开始："+i);
			// 
			Matrix u1Minus2bar = featureExpectationList.get(i-1).getValue().minus(featureExpectationBarList.get(i-2).getValue());
			Matrix uEMinus2bar = featureExpectationOptimal.getValue().minus(featureExpectationBarList.get(i-2).getValue());
			double param = u1Minus2bar.transpose().times(uEMinus2bar).get(0, 0) / (u1Minus2bar.transpose().times(u1Minus2bar)).get(0, 0);
			Matrix featureExpectationsBarIMinus1 = featureExpectationBarList.get(i-2).getValue().plus(u1Minus2bar.times(param));
			
			// set uBar(i-1)
			featureExpectationBarList.add(new FeatureExpectation(featureExpectationsBarIMinus1));
			
			// w(i)
			Matrix wi = featureExpectationOptimal.getValue().minus(featureExpectationsBarIMinus1);
			weightList.add(new Weight(wi));
			
			// t(i)
			double ti = wi.norm2();
			tList.add(i, new T((float) ti));
			
			// 计算policy(i)
			//System.out.println(JsonUtil.toJsonString(weightList.get(i).getWeightMatrix().getArray()));
			Policy policyi = computePolicyByRLAlgorithm(weightList.get(i));
			policyList.add(i, policyi);
			
			// if t(i) <= e break
			if(ti <= e) {
				break;
			}
			
			// compute featureExpectations(i)
			featureExpectationList.add(computeFeatureExpectation(policyi));
		}
		
		//System.out.println("最终policy："+JsonUtil.toJsonString(policyList.get(policyList.size()-1).getValueMatrix().getArray()));
		System.out.println("最终weight："+JsonUtil.toJsonString(weightList.get(weightList.size()-1).getWeightMatrix().getArray()));
		
		System.out.println("u(E)="+JsonUtil.toJsonString(featureExpectationOptimal.getValue().getArray()));
		System.out.println("u()="+JsonUtil.toJsonString(computeFeatureExpectation(policyList.get(policyList.size()-1)).getValue().getArray()));
		
		StringBuffer buffer = new StringBuffer();
		// +1
		buffer.append(getModelData(1, featureExpectationOptimal));
		// -1
		for(int i=0; i<featureExpectationList.size(); i++) {
			buffer.append(getModelData(-1, featureExpectationList.get(i)));
		}
		FileUtil.writeToFile(buffer.toString(), "/Users/pleasebugme/Desktop/model");
	}
	
	public void computeByLibSVM() throws IOException {
		
		// 初始变量
		List<Weight> weightList = new ArrayList<Weight>();
		List<T> tList = new ArrayList<T>();
		List<FeatureExpectation> featureExpectationList = new ArrayList<FeatureExpectation>();
		String modelFileName = "/Users/pleasebugme/Documents/workspace/MachineLearning/model";
		
		// t(0),t(1)
		tList.add(new T(0));
		tList.add(new T(0));
		
		// 最佳策略u(E)
		FeatureExpectation featureExpectationOptimal = this.computeOptimalFeatureExpectation();
		
		// 随机一个策略
		Policy randomPolicy = new Policy(this.dataMatrix.getArray());
		policyList.add(randomPolicy);
		
		// 初始化u(0)
		FeatureExpectation featureExpectation0 = computeFeatureExpectation(randomPolicy);
		featureExpectationList.add(featureExpectation0);
		
		// 初始化w(0), w(1)
		weightList.add(new Weight());
		weightList.add(new Weight(featureExpectationOptimal.getValue().minus(featureExpectation0.getValue())));
		//System.out.println("w(1)="+JsonUtil.toJsonString(weightList.get(1).getWeightMatrix().getArray()));
		
		// 计算policy(1)
		policyList.add(computePolicyByRLAlgorithm(weightList.get(1)));
		
		// 初始化u(1)
		featureExpectationList.add(computeFeatureExpectation(policyList.get(1)));
		
		// 准备model数据文件
		StringBuffer buffer = new StringBuffer();
		// u(E)
		buffer.append(getModelData(1, featureExpectationOptimal));
		// u(1)
		buffer.append(getModelData(-1, featureExpectationList.get(0)));
		FileUtil.writeToFile(buffer.toString(), modelFileName+1);
		
		// 循环使用libsvm方法计算最佳weight
		for(int i=2; i<ITERATOR_COUNT; i++) {
			System.out.println("迭代开始："+i);
			
			// 
			FeatureExpectation feOld = null;
			if(i == 2) {
				feOld = featureExpectation0;
			} else {
				feOld = Main.getFeatureExpectation(modelFileName + (i-2));
			}
			Matrix u1Minus2bar = featureExpectationList.get(i-1).getValue().minus(feOld.getValue());
			Matrix uEMinus2bar = featureExpectationOptimal.getValue().minus(feOld.getValue());
			double param = u1Minus2bar.transpose().times(uEMinus2bar).get(0, 0) / (u1Minus2bar.transpose().times(u1Minus2bar)).get(0, 0);
			Matrix featureExpectationsBarIMinus1 = feOld.getValue().plus(u1Minus2bar.times(param));
			
			// w(i)
			Matrix wi = featureExpectationOptimal.getValue().minus(featureExpectationsBarIMinus1);
			Weight weight = new Weight(wi);
			weightList.add(weight);
			System.out.println("weight："+JsonUtil.toJsonString(weight.getWeightMatrix().getArray()));
			
			// 计算policy(i)
			Policy policyi = computePolicyByRLAlgorithm(weightList.get(i));
			policyList.add(i, policyi);
			
			// compute featureExpectations(i)
			FeatureExpectation fe = computeFeatureExpectation(policyi);
			featureExpectationList.add(fe);
			
			// t(i)
			double ti = weight.getWeightMatrix().transpose().times(featureExpectationOptimal.getValue().minus(fe.getValue())).get(0, 0);
			tList.add(i, new T((float) ti));
			
			// if t(i) <= e break
			if(Math.abs(ti) <= e) {
				break;
			}
			
			// u(i)
			buffer.append(getModelData(-1, featureExpectationList.get(featureExpectationList.size() - 1)));
			FileUtil.writeToFile(buffer.toString(), modelFileName+i);
		}
		
		System.out.println("最终weight："+JsonUtil.toJsonString(weightList.get(weightList.size()-1).getWeightMatrix().getArray()));
		
		System.out.println("u(E)="+JsonUtil.toJsonString(featureExpectationOptimal.getValue().getArray()));
		System.out.println("u()="+JsonUtil.toJsonString(computeFeatureExpectation(policyList.get(policyList.size()-1)).getValue().getArray()));
	}
	
	private String getNumberFormat(double d) {
		DecimalFormat df  = new DecimalFormat("##.0000");

	    return df.format(d);
	}
	
	private String getModelData(int label, FeatureExpectation fe) {
		StringBuffer buffer = new StringBuffer();
		buffer.append((label==1?"+1":"-1") + " ");
		for(int i=0; i<fe.getValue().getArray().length; i++) {
			buffer.append(i+1+":"+getNumberFormat(fe.getValue().getArray()[i][0]) + " ");
		}
		buffer.append("\n");
		return buffer.toString();
	}
	
	/**
	 * 根据权重向量使用RL值迭代算法计算policy
	 * @param weight 15x1
	 * @return policy
	 */
	private Policy computePolicyByRLAlgorithm(Weight weight) {
		double[] value = new double[Policy.STATE_COUNT];
		double[] newValue = new double[Policy.STATE_COUNT];
		
		int iteration = 0;
		
		while(true) {
			iteration++;
			if(iteration > ITERATOR_COUNT) {
				break;
			}
			// 对于每个状态计算v(s)，直到v(s)收敛
			for(int i=0; i<Policy.STATE_COUNT; i++) {
				State state = getStateByStateIndex(i+1000);
				Feature feature = getFeatureByState(state);
				
				float maxSumQStar = -1;
				int[] possibleAction = getPossibleActionByLaneOn(state.getLaneOn());
				// 对于所有可能action，计算最大值v(s')
				int action = 0;
				for(int j=0; j<possibleAction.length; j++) {
					action = possibleAction[j];
					
					PossibilityStateDistributionOverAction possibilityStateDistribution = getPossibilityStateDistributionByCurrentStateAndAction(state, action);
					List<Integer> stateIndex = possibilityStateDistribution.getStateIndex();
					List<Float> possibility = possibilityStateDistribution.getPossibility();
					
					float sumQStar = 0;
					for(int k=0; k<stateIndex.size(); k++) {
						//System.out.println(JsonUtil.toJsonString(getStateByStateIndex(stateIndex[k])));
						sumQStar += (value[stateIndex.get(k) - 1000] * possibility.get(k)) ;
					}
					if(sumQStar >= maxSumQStar) {
						maxSumQStar = sumQStar;
						// 计算新policyArray
						//policyArray[i][0] = action;
					}
				}
				//System.out.println("reward:"+(i+1000)+" "+getReward(weight, feature));
				double reward = getReward(weight, feature);
				if(reward > 1 || reward < -1) {
					//System.out.println("reward="+reward);
					newValue[i] = reward;
				} else {
					newValue[i] = reward + maxSumQStar;
				}
			}
			
			//System.out.ln(JsonUtil.toJsonString(newValue));
			// 检测是否已经收敛
			if(this.isConverge(value, newValue)) {
//				System.out.println("结果已经收敛，迭代次数：" + iteratorCount);
//				// 打印
//				for(int i=0; i<value.length; i++) {
//					if(i%4 == 0) {
//						System.out.println();
//					}
//					System.out.printf("%+.4f, ", value[i]);
//					
//				}
				//System.out.println("结果已经收敛，迭代次数：" + iteration);
				
				break;
			}else {
				//System.out.println("迭代次数："+iteration);
			}
			
			for(int i=0; i<value.length; i++) {
				value[i] = newValue[i];
			}
		}
		
		//System.out.println(JsonUtil.toJsonString(newValue));
		/*
		System.out.println("-7 "+JsonUtil.toJsonString(newValue[3909]));
		System.out.println("-6 "+JsonUtil.toJsonString(newValue[3919]));
		System.out.println("-5 "+JsonUtil.toJsonString(newValue[3929]));
		System.out.println("-4 "+JsonUtil.toJsonString(newValue[3939]));
		System.out.println("-3 "+JsonUtil.toJsonString(newValue[3949]));
		System.out.println("-2 "+JsonUtil.toJsonString(newValue[3959]));
		System.out.println("-1 "+JsonUtil.toJsonString(newValue[3969]));
		System.out.println("0  "+JsonUtil.toJsonString(newValue[3979]));
		System.out.println("1  "+JsonUtil.toJsonString(newValue[3989]));
		System.out.println("2  "+JsonUtil.toJsonString(newValue[3999]));
		*/
		return getPolicyByValueArray(newValue);
	}
	
	private Policy getPolicyByValueArray(double[] valueArray) {
		double[][] policyArray = new double[Policy.STATE_COUNT][1];
		for(int i=0; i<Policy.STATE_COUNT; i++) {
			State state = getStateByStateIndex(i+1000);
			int[] possibleActions = getPossibleActionByLaneOn(state.getLaneOn());
			
			List<Integer> possibleNextStateIndexes = new ArrayList<Integer>();
			for(int j=0; j<possibleActions.length; j++) {
				possibleNextStateIndexes.addAll( getPossibleNextStateIndexes(i, possibleActions[j]) );
			}
			int maxValueStateIndex = getMaxValueStateIndexByStateIndexes(possibleNextStateIndexes, valueArray);
			policyArray[i][0] = getActionFromStartStateIndexToEndStateIndex(i+1000, maxValueStateIndex);
		}
		//System.out.println("policy:"+JsonUtil.toJsonString(policyArray));
		return new Policy(new Matrix(policyArray));
	}
	
	private int getActionFromStartStateIndexToEndStateIndex(int startStateIndex, int endStateIndex) {
		return endStateIndex / 1000;
	}
	
	private List<Integer> getPossibleNextStateIndexes(int stateIndex, int action) {
		State state = getStateByStateIndex(stateIndex);
		PossibilityStateDistributionOverAction psdoa = getPossibilityStateDistributionByCurrentStateAndAction(state, action);
		return psdoa.getStateIndex();
	}
	
	private int getMaxValueStateIndexByStateIndexes(List<Integer> possibleNextStateIndexes, double[] valueArray) {
		int result = 0;
		double max = -2;
		for(int i=0; i<possibleNextStateIndexes.size(); i++) {
			int nextStateIndex = possibleNextStateIndexes.get(i);
			if(valueArray[nextStateIndex - 1000] > max) {
				max = valueArray[nextStateIndex - 1000];
				result = nextStateIndex;
			}
		}
		return result;
	}
	
	/**
	 * 由状态特征序号计算奖励值
	 * @param weight 权重向量 15x1
	 * @param feature 特征向量
	 * @return
	 */
	private double getReward(Weight weight, Feature feature) {
		double result = weight.getWeightMatrix().transpose().times(feature.getFeatureMatrix()).get(0, 0);
		if(result > 1) {
			result = -1;
		} else if(result < -1) {
			result = -1;
		}
		return result;
	}
	
	/**
	 * 检测两次结果值是否已经满足收敛条件（每个元素差值小于maxError）
	 * @param oldValue 旧值
	 * @param newValue 新值
	 * @param maxError 最大差值
	 * @return
	 */
	public boolean isConverge(double[] oldValue, double[] newValue) {
		boolean converged = true;
		float maxError = 0;
		for(int i=0; i<oldValue.length; i++) {
			if(Math.abs(oldValue[i] - newValue[i]) > RL_MAX_ERROR) {
				maxError = (float) Math.abs(oldValue[i] - newValue[i]);
				//System.out.println(i+"converge:"+maxError+" newValue[i]="+newValue[i]);
				converged = false;
			}
		}
		
		return converged;
	}
	
	/**
	 * 得到某些矩阵列矩阵
	 * @param matrix
	 * @param fromColumnIndex
	 * @parm toColumnIndex
	 * @return
	 */
	private Matrix getMatrixColumn(Matrix matrix, int fromColumnIndex, int toColumnIndex) {
		return matrix.getMatrix(0, matrix.getRowDimension() - 1, fromColumnIndex, toColumnIndex);
	}
	
	/**
	 * 得到某矩阵的行矩阵
	 * @param matrix
	 * @param rowIndex
	 * @return
	 */
	private Matrix getMatrixRow(Matrix matrix, int rowIndex) {
		return matrix.getMatrix(rowIndex, rowIndex, 0, matrix.getColumnDimension()-1);
	}
	
	/**
	 * 由状态序号计算状态对象
	 * @param stateIndex 1000 ~ 5999
	 * @return
	 */
	public State getStateByStateIndex(int stateIndex) {
		int laneOn = stateIndex / 1000;
		int distanceOf2 = (stateIndex%1000) / 100 - 7;
		int distanceOf3 = (stateIndex%100) / 10 - 7;
		int distanceOf4 = (stateIndex%10) / 1 - 7;
		
		return new State(laneOn, distanceOf2, distanceOf3, distanceOf4);
	}
	
	/**
	 * 由状态对象得到状态序号(1000 ~ 5999)
	 * @param state
	 * @return
	 */
	public static int getStateIndexByState(State state) {
		int laneOn = state.getLaneOn();
		int distanceOf2 = state.getDistanceOf2();
		int distanceOf3 = state.getDistanceOf3();
		int distanceOf4 = state.getDistanceOf4();
		
		return laneOn * 1000 + (distanceOf2+7)*100 + (distanceOf3+7)*10 + (distanceOf4+7)*1;
	}
	
	/**
	 * 由状态对象计算该状态特征值
	 * @param state
	 * @return
	 */
	public Feature getFeatureByState(State state) {
		Matrix featureMatrix = new Matrix(FEATURE_LENGTH, 1);
		double[][] stateFeatureVectorArray = featureMatrix.getArray();
		
		int laneOn = state.getLaneOn();
		
		int closestDistanceOfCurrentLane = -7;
		if(laneOn == 2) {
			closestDistanceOfCurrentLane = state.getDistanceOf2();
		} else if(laneOn == 3) {
			closestDistanceOfCurrentLane = state.getDistanceOf3();
		} else if(laneOn == 4) {
			closestDistanceOfCurrentLane = state.getDistanceOf4();
		}
		stateFeatureVectorArray[laneOn-1][0] = 1;
		stateFeatureVectorArray[12 + closestDistanceOfCurrentLane][0] = 1;
		
		return new Feature(featureMatrix);
	}
	
	/**
	 * 由当前所在车道计算可执行的动作（可以转向的其他车道）
	 * @param laneOn
	 * @return
	 */
	public int[] getPossibleActionByLaneOn(int laneOn) {
		if(laneOn == 1) {
			return new int[]{1, 2};
		} else if(laneOn == 2) {
			return new int[]{1, 2, 3};
		} else if(laneOn == 3) {
			return new int[]{2, 3, 4};
		} else if(laneOn == 4) {
			return new int[]{3, 4, 5};
		} else {
			return new int[]{4, 5};
		}
	}
	
	/**
	 * 由当前状态和action计算下一状态序号分布（假设小车转向不需要时间）
	 * @param state
	 * @param action
	 * @return
	 */
	public PossibilityStateDistributionOverAction getPossibilityStateDistributionByCurrentStateAndAction(State state, int action) {
		
		List<Float> possiblity = new ArrayList<Float>();
		List<Integer> stateIndex = new ArrayList<Integer>();
		// TODO
		float lane2ChangePossibility = 1.0f / (float)Car.CAR_LENGTH / (Car.SPEED_INIT - Car.SPEED_FAST);
		float lane3ChangePossibility = 1.0f / (float)Car.CAR_LENGTH / (Car.SPEED_INIT - Car.SPEED_MEDIUM);
		float lane4ChangePossibility = 1.0f / (float)Car.CAR_LENGTH / (Car.SPEED_INIT - Car.SPEED_SLOW);
		
		int distanceOf2 = state.getDistanceOf2();
		int distanceOf3 = state.getDistanceOf3();
		int distanceOf4 = state.getDistanceOf4();
		
		int[] nextDistancesOf2 = getNextDistance(distanceOf2); 
		int[] nextDistancesOf3 = getNextDistance(distanceOf3); 
		int[] nextDistancesOf4 = getNextDistance(distanceOf4); 
		
		// 
		int nextStateNoChanged = getStateIndexByState(new State(action, state.getDistanceOf2(), state.getDistanceOf3(), state.getDistanceOf4()));
		stateIndex.add( nextStateNoChanged );
		possiblity.add( (1 - lane2ChangePossibility) * (1 - lane3ChangePossibility) * (1 -lane4ChangePossibility) );
		for(int i=0; i<nextDistancesOf4.length; i++) {
			stateIndex.add(getStateIndexByState(new State(action, state.getDistanceOf2(), state.getDistanceOf3(), nextDistancesOf4[i])));
			possiblity.add( (1 - lane2ChangePossibility) * (1 - lane3ChangePossibility) * lane4ChangePossibility / nextDistancesOf4.length );
		}
		
		//
		for(int i=0; i<nextDistancesOf3.length; i++) {
			stateIndex.add(getStateIndexByState(new State(action, state.getDistanceOf2(), nextDistancesOf3[i], state.getDistanceOf4())));
			possiblity.add( (1 - lane2ChangePossibility) * lane3ChangePossibility * (1 - lane4ChangePossibility) / nextDistancesOf3.length );
			for(int j=0; j<nextDistancesOf4.length; j++) {
				stateIndex.add(getStateIndexByState(new State(action, state.getDistanceOf2(), nextDistancesOf3[i], nextDistancesOf4[j])));
				possiblity.add( (1 - lane2ChangePossibility) * lane3ChangePossibility * lane4ChangePossibility / nextDistancesOf3.length / nextDistancesOf4.length);
			}
		}
		
		//
		for(int i=0; i<nextDistancesOf2.length; i++) {
			stateIndex.add(getStateIndexByState(new State(action, nextDistancesOf2[i], state.getDistanceOf3(), state.getDistanceOf4())));
			possiblity.add( lane2ChangePossibility * (1 - lane3ChangePossibility) * (1 - lane4ChangePossibility) / nextDistancesOf2.length );
			for(int j=0; j<nextDistancesOf4.length; j++) {
				stateIndex.add( getStateIndexByState(new State(action, nextDistancesOf2[i],  state.getDistanceOf3(), nextDistancesOf4[j])) );
				possiblity.add( lane2ChangePossibility * (1 - lane3ChangePossibility) * lane4ChangePossibility / nextDistancesOf2.length / nextDistancesOf4.length);
			}
		}
		
		//
		for(int i=0; i<nextDistancesOf2.length; i++) {
			for(int j=0; j<nextDistancesOf3.length; j++) {
				stateIndex.add( getStateIndexByState(new State(action, nextDistancesOf2[i], nextDistancesOf3[j], state.getDistanceOf4())) );
				possiblity.add( lane2ChangePossibility * lane3ChangePossibility * (1 - lane4ChangePossibility) / nextDistancesOf2.length / nextDistancesOf3.length);
				for(int k=0; k<nextDistancesOf4.length; k++) {
					stateIndex.add( getStateIndexByState(new State(action, nextDistancesOf2[i], nextDistancesOf3[j], nextDistancesOf4[k])) );
					possiblity.add( lane2ChangePossibility * lane3ChangePossibility * lane4ChangePossibility / nextDistancesOf2.length / nextDistancesOf3.length / nextDistancesOf4.length);
				}
			}
		}
		
		return new PossibilityStateDistributionOverAction(possiblity, stateIndex);
	}
	
	/**
	 * 由当前最近距离计算，下一状态最近距离
	 * @param distance
	 * @return
	 */
	private int[] getNextDistance(int distance) {
		if(distance < 0) {
			return new int[]{distance+1};
		} else if(distance == 0){
			return new int[]{-1, 1};
		} else if(distance == 1){
			return new int[]{-2, 2};
		} else {
			return new int[]{-2, -3, -4, -5, -6, -7};
		}
	}
	
	public static void main(String[] args) {
		/*
		double[][] d = new double[][]{{3}, {-7}, {-6}, {-5}};
		Matrix m = new Matrix(d);
		System.out.println(InverseReinfocementLearning.getStateIndexByStateMatrix(m));
		*/
		double[][] d = new double[][]{{-0.0439}, {0.0078}, {0.0078}, {0.0318}, {-0.0035}, {0.032}, {0.024}, {0.016}, {0.008}, {0.0001}, {-0.007}, {-0.014}, {-0.0767}, {0.012}, {0.034}};
		InverseReinfocementLearning irl = new InverseReinfocementLearning();
		irl.computePolicyByRLAlgorithm(new Weight(new Matrix(d)));
		//FeatureExpectation fe = irl.computeOptimalFeatureExpectation();
		//System.out.println(JsonUtil.toJsonString(fe.getValue().getArray()));
		//System.out.println(JsonUtil.toJsonString(irl.computeFeatureExpectation(new Policy(3)).getValue().getArray()));
	}
}
