public  class QLearning extends TemporalDifference{
	
	public QLearning(double alpha, double gamma, double epsilon,double reward){
		
		this.alpha=alpha;
		this.gamma=gamma;
		this.epsilon=epsilon;
		this.reward=reward;
	}
	
public void Learn(Component component){
		
		Racer r = (Racer)component;
		if(reward!=10){
				r.qValues[r.oldState.x][r.oldState.y][r.oldState.action] +=
					alpha*(reward + gamma*max(r.qValues[r.currentState.x][r.currentState.y])-
							r.qValues[r.oldState.x][r.oldState.y][r.oldState.action] );
				
				r.exploration[r.oldState.x][r.oldState.y][r.oldState.action]++;
				if(r.exploration[r.oldState.x][r.oldState.y][r.oldState.action] >500){
					
					for(int i = 0;i<4;i++){
						r.qValues[r.oldState.x][r.oldState.y][i] = 0;
						r.exploration[r.oldState.x][r.oldState.y][r.oldState.action] = 0;
					}
				}
		}
		else{
			r.qValues[r.oldState.x][r.oldState.y][r.oldState.action] +=
 			alpha*(reward);
			for(int k = 0;k<12;k++)
				for(int m = 0;m<12;m++)
						for(int i = 0;i<4;i++)
							r.exploration[k][m][i] = 0;
		}
	}
		
	public void Learn(Component c1,Component c2){
		Police p = (Police)c1;
		PoliceStation polStation = (PoliceStation)c2;
		if(reward!=10){
				polStation.qValues[p.oldState.x][p.oldState.y][p.oldState.action] +=
					alpha*(reward + gamma*max(polStation.qValues[p.currentState.x][p.currentState.y])-
							polStation.qValues[p.oldState.x][p.oldState.y][p.oldState.action] );
		}
		else{
			polStation.qValues[p.oldState.x][p.oldState.y][p.oldState.action] +=
 			alpha*(reward);
		}
	}
	public double max(double actions[]){
		double maxAction = actions[0];
		for(int i = 1;i<actions.length;i++){
			if(maxAction < actions[i])
				maxAction = actions[i];
		}
		return maxAction;
	}
}