#include "darts.h"

/* 
 * The default player aims for the maximum score, unless the
 * current score is less than the number of wedges, in which
 * case it aims for the exact score it needs. 
 * 
 * <CODE HERE>: You may use the following functions as a basis for 
 * implementing the Q learning algorithm or define your own 
 * functions.
 * ----------------------------------------
 * Note: Our implementation is in Q_learning() -- the two 
 * functions below are unchanged
 */

location start_game(float gamma) {
  location result;
  
  result.wedge = NUM_WEDGES;
  result.ring = MIDDLE_RING;

  return result;
}

location get_target(int score) {
  location result;

  if (score>NUM_WEDGES) {
    result.wedge = NUM_WEDGES;
    result.ring = MIDDLE_RING;
  } else {
    result.wedge = score;
    result.ring = SECOND_PATCH;
  }

  return result;
}
 
/* 
 * Exploration/ exploitation strategy one.
 * --------------------------------------------------
 * Simple strategy with P(explore) = exp(-t/1000) */
int ex_strategy_one(int t) {
	float epsilon = exp(-(float)t/1000);  // Probability of exploration
  float r = (float) rand()/RAND_MAX;
  return (r < epsilon)?1:0;
}

/* 
 * Exploration/ exploitation strategy two.
 * --------------------------------------------------
 * Try to explore when the minimum num_actions is low */
int ex_strategy_two(int t, float *num_actions_in_state) {
  // Find minimum number of actions sampled in the current state
	int min_num_actions = 0;
  for (int a=0; a<NUM_ACTIONS; a++) {
    if (num_actions_in_state[a] < min_num_actions);
      min_num_actions = num_actions_in_state[a];
  }
  
  // Incorporate minimum number of actions into our decay function
  float epsilon = exp(-(float)(t*min_num_actions/10));
  float r = (float) rand()/RAND_MAX;
  return (r < epsilon)?1:0;
  
  /* Note that there is additional code in Q_learning() in order to prioritize 
   * exploring the least-explored actions */
}

/* 
 * The Q-learning algorithm.
 */
void Q_learning(float gamma, int num_games) {
  location actions[NUM_ACTIONS];
  float Q[NUM_STATES][NUM_ACTIONS];
  float num_actions[NUM_STATES][NUM_ACTIONS];
  int num_iterations = 0;
  int s, s_prime;
  float alpha;
  
  /* store all actions (targets on dartboard) in actions array */
  get_actions(actions);
  
  /* Initialize arrays: Q(s,a) to R(s,a), counts array num_actions to 0 */
  for (int s = 0; s < NUM_STATES; s++) {
    for (int a = 0; a < NUM_ACTIONS; a++) {
      Q[s][a] = R(s,actions[a]);
      num_actions[s][a] = 0;
    }
  }

  /* play num_games games */
  for (int g = 1; g <= num_games; g++) {

    /* run a single game */
    for (s = START_SCORE; s > 0;) {
  
      int target_index;
      int to_explore;
      location loc;
      
  		num_iterations++;
    	  
      /* The following two statements implement two exploration-exploitation strategies. */
    	//to_explore = ex_strategy_one(num_iterations);
    	to_explore = ex_strategy_two(num_iterations, num_actions[s]);
    		
      if(to_explore) {
        /* explore */
        // Code for ex_strategy_one: randomly pick action
        //target_index = rand() % NUM_ACTIONS;
        
        // Code for ex_strategy_two: pick least-explored action
        int min_a = 0;
        for (int a=0; a<NUM_ACTIONS; a++) {
          if (num_actions[s][a] < num_actions[s][min_a])
            min_a = a;
        }
        target_index = min_a;
      } else {
        /* exploit action that gives max Q*/
        int maxQa = rand() % NUM_ACTIONS;
        for (int a=0; a<NUM_ACTIONS; a++)
          if (Q[s][a] > Q[s][maxQa]) 
            maxQa = a;
        target_index = maxQa;
      }
      
      /* Get result of throw from dart thrower; update score and counts */
      loc = throw(actions[target_index]); 
      s_prime = s - location_to_score(loc);
      if (s_prime < 0)
        s_prime = s;
      num_actions[s][target_index]++;
      
      /* Calculate alpha */
      int samples_at_a = 0;
      for (int is=0; is<NUM_STATES; is++)
        samples_at_a += num_actions[is][target_index];
      alpha = (float)1/samples_at_a;
      
      /* Find max_{a'}{Q[s',a']} */
      int maxQ_sp_ap = 0;
      for (int a=0; a<NUM_ACTIONS; a++)
        if (Q[s_prime][a] > maxQ_sp_ap)
          maxQ_sp_ap = Q[s_prime][a];
      
      /* Q update */
      // Note that we omitted the reward function because it will always be 0 when s!=0.
      Q[s][target_index] = Q[s][target_index] + alpha * (gamma*maxQ_sp_ap - Q[s][target_index]);
      
      /* Next state becomes current state */
      s = s_prime;
    }
  }  
	printf("Num games = %d, Average turns = %f\n", num_games, (float)num_iterations/num_games);
  
  /*
  for (s=0; s<NUM_STATES; s++) {
    for (int a=0; a<NUM_ACTIONS; a++)
      printf("%f ", Q[s][a]);
    printf("\n");
  }*/
}

