#include "darts.h"
#include <stdio.h>
#include <stdlib.h>

/* The epsilon value for the modelbased infinite horizon policy iteration algorithm */
#define EPSILON_VI 0.001


/* 
 * NOTE: We did not implement the interface methods start_game and get_target.
 * Functions below are simply the default darts player discussed in modelfree.c.
 * Instead, we defined the function modelbased, which uses the helper methods
 * ex_strategy_one and ex_strategy_two. 
 */

location start_game(float gamma) {
  location result;
  
  result.wedge = NUM_WEDGES;
  result.ring = MIDDLE_RING;

  return result;
}

location get_target(int score) {
  location result;

  if (score>NUM_WEDGES) {
    result.wedge = NUM_WEDGES;
    result.ring = MIDDLE_RING;
  } else {
    result.wedge = score;
    result.ring = SECOND_PATCH;
  }

  return result;
}


/* <CODE HERE>: Define your first exploration/exploitation strategy here. Return 0 to exploit and 1 to explore. 
 You may want to pass arguments from the modelbased function.
 --------------------------------------------------
 Simple strategy with P(explore) = exp(-t/1000) */
int ex_strategy_one(int t) {
  float epsilon = exp(-(float)t/1000);  // Probability of exploration
  float r = (float) rand()/RAND_MAX;
  return (r < epsilon)?1:0;
}

/* <CODE HERE>: Define your second exploration/exploitation strategy here. Return 0 to exploit and 1 to explore. 
 You may want to pass arguments from the modelbased function. 
 --------------------------------------------------
 Try to explore when the minimum num_actions is low */
int ex_strategy_two(int t, float *num_actions_in_state) {
  // Find minimum number of actions sampled in the current state
  int min_num_actions = 0;
  for (int a=0; a<NUM_ACTIONS; a++) {
    if (num_actions_in_state[a] < min_num_actions);
      min_num_actions = num_actions_in_state[a];
  }
  
  // Incorporate minimum number of actions into our decay function
  float epsilon = exp(-(float)(t*min_num_actions/10));
  float r = (float) rand()/RAND_MAX;
  return (r < epsilon)?1:0;
  
  /* Note that we modified code in modelbased() in order to prioritize 
   * exploring the least-explored actions */
}

/* <CODE HERE>: You may need to change this code to pass 
 * additional arguments to the ex_strategy_() functions.
 * The model-based reinforcement learning algorithm. 
 * Given num_games (the number of games to play), store the
 * learned transition probabilities in T.
 */
void modelbased(float gamma, int epoch_size, float T_matrix[NUM_STATES][NUM_STATES][NUM_ACTIONS], int num_games) {
    location actions[NUM_ACTIONS];
    int pi_star[NUM_STATES];
    int s, a, s_prime, g, i, j, k;
    float num_actions[NUM_STATES][NUM_ACTIONS];
    float num_transitions[NUM_STATES][NUM_STATES][NUM_ACTIONS];
    int num_iterations = 0;
    
    /* store all actions (targets on dartboard) in actions array */
    get_actions(actions);
    
    /* Initialize all arrays to 0 except the policy, which should be assigned a random action for each state. */
    for (s = 0; s < NUM_STATES; s++) {
        pi_star[s] = rand() % NUM_ACTIONS;
        
        for (a = 0; a < NUM_ACTIONS; a++) {
            num_actions[s][a] = 0;
        }

        for (s_prime = 0; s_prime < NUM_STATES; s_prime++) {
            for (a = 0; a < NUM_ACTIONS; a++) {
                num_transitions[s][s_prime][a] = 0;
                T_matrix[s][s_prime][a] = 0;
            }
        }
    }

    /* play num_games games, updating policy after every EPOCH_SIZE number of throws */
    for (g = 1; g <= num_games; g++) {
      
    	/* run a single game */
      for (s = START_SCORE; s > 0;) {
      
        int target_index;
        int to_explore;
        location loc;
    
    		num_iterations++;
    		
        /* The following two statements implement two exploration-exploitation
         * strategies. Comment out the strategy that you wish not to use.
         */
    		//to_explore = ex_strategy_one(num_iterations);
    		to_explore = ex_strategy_two(num_iterations, num_actions[s]);
    		
        if(to_explore) {
          /* explore */
          // Code for ex_strategy_one: randomly pick action
          //target_index = rand() % NUM_ACTIONS;
          
          // Code for ex_strategy_two: pick least-explored action
          int min_a = 0;
          for (int a=0; a<NUM_ACTIONS; a++) {
            if (num_actions[s][a] < num_actions[s][min_a])
              min_a = a;
          }
          target_index = min_a;
        } else {
          /* exploit */
          target_index = pi_star[s];
        }
        
        /* Get result of throw from dart thrower; update score if necessary */
        loc = throw(actions[target_index]); 
        s_prime = s - location_to_score(loc);
        if (s_prime < 0)
            s_prime = s;
            
        /* Update experience:
         * increment number of times this action was taken in this state;
         * increment number of times we moved from this state to next state on this action. */

        num_actions[s][target_index]++;
        num_transitions[s][s_prime][target_index]++;

        /* Next state becomes current state */
        s = s_prime;

        /* Update our learned MDP and optimal policy after every EPOCH_SIZE throws, 
         * using infinite-horizon value iteration. 
         */
		
        if (num_iterations % epoch_size == 0) {
				
				/* Update transition probabilities */
				for (i = 0; i < NUM_STATES; i++)
					for (j = 0; j < NUM_STATES; j++)
						for (k = 0; k < NUM_ACTIONS; k++)
							if (num_actions[i][k] != 0)
								T_matrix[i][j][k] = num_transitions[i][j][k] / num_actions[i][k];

				/* Update strategy (stored in pi) based on newly updated reward function and transition
				 * probabilities */
				modelbased_value_iteration(gamma, T_matrix, pi_star);
			
        }
      }
    }
    
	printf("NumberOfGames= %d , EpochSize= %d , AverageTurns = %f\n", num_games, epoch_size, (float)num_iterations/num_games);
}

/* A modified version of infinite horizon value iteration from part 2 */
void modelbased_value_iteration(float gamma, float T_matrix[][NUM_STATES][NUM_ACTIONS], int *pi_star) {
    float V[2][NUM_STATES];
    int s, a, s_prime;
    int converging = 0, num_iterations = 0;
    location actions[NUM_ACTIONS];

    /* Get all possible actions */
    get_actions(actions);

    /* initialize v */
    for (s = 0; s < NUM_STATES; s++)
        V[0][s] = 0;

    /* iterate until all state values (v[s]) converge */
    while (!converging) {
        num_iterations++;
        for (s = 0; s < NUM_STATES; s++) {
            float Q[NUM_ACTIONS];

            for (a = 0; a < NUM_ACTIONS; a++) {
                location action;
                action = actions[a];

                /* find the value of each action, given state s */
                Q[a] = R(s, actions[a]);
                for (s_prime = 0; s_prime < NUM_STATES; s_prime++)
                    Q[a] += gamma * T_matrix[s][s_prime][a] * V[0][s_prime];

                /* find the action that maximizes Q and the maximum value of Q
                 */
                if (a == 0 || Q[a] > V[1][s]) {
                    pi_star[s] = a;
                    V[1][s] = Q[a];
                }
            }
        }

        /* values of v for iteration k become the values of v for iteration k-1
         */
        converging = 1;
        for (s = 0; s < NUM_STATES; s++) {
            /* check for one component that does not converge */
            if (EPSILON_VI < abs(V[0][s] - V[1][s]))
                converging = 0;

            V[0][s] = V[1][s];
        }
    }
}

