#include "darts.h"
 
/* Components of a darts player. */

location pi_star[NUM_STATES];    /* stores optimal strategy after value iteration */
float V[NUM_STATES];    /* stores value for each state after value iteration */


/* Get the intial target. */
location start_game(float gamma) {
  
  /* call infinite_value_iteration to determine optimum strategy */
  infinite_value_iteration(gamma);

  return pi_star[START_STATE];
}

/* Get the next target */
location get_target(int s) {
	
  return pi_star[s];
}

/* <CODE HERE>: Implement the transition function.
 * Given the previous state, next state, and action (previous score,
 * next_score, target), returns the transition probability in the MDP
 */
float T(int s, int s_prime, location a) {
    int t_points = s - s_prime; // Target number of points
    float P = 0.0;              // Cumulative probability of getting target number of points
    float p_array[] = {0.1, 0.2, 0.4, 0.2, 0.1};  
    
    if (t_points > 3*NUM_WEDGES || t_points < 0)
      return 0;
    
    for (int ir = 0; ir < 5; ir++) {
      for (int iw = 0; iw < 5; iw++) {
        int wedgeN = wedges[(angles[a.wedge]-2+iw) % NUM_WEDGES];
        int ringN = a.ring-2+ir;
        if (ringN > 6)
          ringN = 6;
        if (ringN < 0)
          ringN *= -1;
        
        location *loc = (location*)malloc(sizeof(location));
        loc->ring = ringN;
        loc->wedge = wedgeN;
        int points = location_to_score(*loc);
        
        if (points == t_points) {
          P += p_array[ir] * p_array[iw];
        }
      }
    }
    return P;
}

/* Value iteration algorithm for infinite horizon
 * Given gamma (discounted value), store optimal strategy in pi
 * and value for each state in v_star.
 */
void infinite_value_iteration(float gamma) {
    float V_prime[NUM_STATES];
    int s, s_prime, a;
    int converging = 0, num_iterations = 0;
    location actions[NUM_ACTIONS];
    
    /* Get all possible actions */
    get_actions(actions);

    /* initialize v */
    for (s = 0; s < NUM_STATES; s++)
        V[s] = 0;

    /* iterate until all state values (V[s]) converge */
    while (!converging) {
        num_iterations++;
        for (s = 0; s < NUM_STATES; s++) {
			
			V_prime[s] = V[s];
			
            for (a = 0; a < NUM_ACTIONS; a++) {
				
            	float Q_a;
                location action;
                action = actions[a];

                /* find the value of each action, given state s */
                Q_a = R(s, actions[a]);
				
                for (s_prime = 0; s_prime < NUM_STATES; s_prime++)
                    Q_a += gamma * T(s, s_prime, action) * V_prime[s_prime];

                /* find the action that maximizes Q and the maximum value of Q 
                 */
                if (a == 0 || Q_a > V[s]) {
                    pi_star[s] = action;
                    V[s] = Q_a;
                }
            }
        }

        converging = 1; 
        for (s = 0; s < NUM_STATES; s++) {
        	/* check for one component that does not converge */
        	if (EPSILON < abs(V_prime[s] - V[s])){
        		converging = 0;
			}
            
        }
    }

    /* printf("# of iterations: %d\n", num_iterations); */
}
