public class Sarsa {

    /**
     * Implement Sarsa with greedy action selection, α=0.1, and optimistic
     * initialization between 6 and 7 as suggested above. Run for a number
     * of episodes and observe the returns, which should increase over
     * episodes as learning progresses up to about 5 or 6. Print out the
     * final policy after things seem to have stabilized. Create this
     * code by modifying the provided file Sarsa.java. Do not change the
     * filename or class name. Do not make any other files; put all your
     * code in this one file.
     */

	final static double alpha = 0.1;
	final static int nepisodes = 10000;
	final static int nruns = 1;

	final static boolean PRINT_POLICY = true;
	final static boolean PRINT_EPISODE_AVGS = false;

    final static int NUM_STATES = 10;
    final static int NUM_ACTIONS = 3;
	final static int TERMINAL_STATE = -1;

    static double[][] q; // Q(S,A)

    static double[] episodeAvg;

    private final static String[] stateNames = {
                                                "RU_8p",
                                                "TU_10p",
                                                "RU_10p",
                                                "RD_10p",
                                                "RU_8a",
                                                "RD_8a",
                                                "TU_10a",
                                                "RU_10a",
                                                "RD_10a",
                                                "TD_10a" };
    
    private final static String[]  actionNames= { "P", "R", "S" };

    /**
     * Runs Sarsa on problem for 'nepisodes' episodes, and repeats
     * 'nruns' times independently. 
     */
	public static void main(String[] arg) {
        
       episodeAvg = new double[nepisodes]; // array for storing episode return avgs

        // do 100 runs of 1000 episodes,
        for ( int run = 0; run < nruns; run++ ) {

            // initialize q(s,a) optimisically to random double between
            // 6 and 7 for all s, a
            q = new double[NUM_STATES][NUM_ACTIONS]; 

            for ( int s = 0; s < NUM_STATES; s++ ) {
                for ( int a = 0; a < NUM_ACTIONS; a++ ) {

                    q[s][a] = 6 + Math.random();

                } 
            } // init q(s,a)

            // run a number of episodes...
            for ( int epi = 0; epi < nepisodes; epi++ ) {
                
                double ret = 0.0d;
                
                // init state
                int state = Party.init();
                int numActions = Party.numActions( state );

                // Choose greedy action from state using Q
                int action = chooseGreedyAction( state );

                // Repeat for each step until terminal state:
                while ( state != TERMINAL_STATE ) {

                    // Take action, observe reward and nextState
                    int nextState = Party.transition( state, action );
                    double reward = Party.reward( state, action, nextState );
                    
                    // Choose greedy nextAction from nextState
                    int nextAction = chooseGreedyAction( nextState );
                    
                    // update Q(s,a)
                    q[state][action] = qValue(state, action)
                            + alpha * ( reward + qValue(nextState, nextAction) 
                                               - qValue(state, action) );

                    state = nextState;
                    action = nextAction;
                    ret += reward;

                } // until state is terminal

                // calculate avg return for episode number
                episodeAvg[epi] = episodeAvg[epi]
                                + ((ret - episodeAvg[epi]) / ((double) run + 1 ));

                System.out.format("After episode %d:\n", epi);
                printPolicy();

            } // do nepisodes episodes

            // print out the final policy
            if (PRINT_POLICY) {
                System.out.format("Run #%d:\n", run + 1 );
                printPolicy();
            }

        } // do nruns runs

        if (PRINT_EPISODE_AVGS) {
            printEpisodeAvgs();
        }
        

	} // main

    /**
     * Return highest valued action available from state given q.
     */
    private static int chooseGreedyAction( int state ) {
    
        if ( state == TERMINAL_STATE ) {
            return 0;
        }

        int greedyAction = 0;
        double bestValue = qValue( state, 0 );

        for ( int a = 0; a < Party.numActions(state); a++ ) {

            if ( qValue(state, a) > bestValue ) {
                bestValue = qValue(state, a);
                greedyAction = a;
            }

        }
        
        return greedyAction;

    } // chooseGreedyAction

    /**
     * Return Q( state, action ). Attributes value of 0 to any 
     * action from terminal state.
     */
    private static double qValue( int state, int action ) {

        double value = 0;

        if ( state == TERMINAL_STATE ) {
            value = 0;

        } else {
            value = q[state][action];

        }

        return value;

    } // qValue

    /**
     * Prints out policy.
     */
    private static void printPolicy() {

        // Policy is greedy, so for each state, print action with
        // highest Q(s,a)
        for ( int state = 0; state < 10; state++ ) {

            int action = chooseGreedyAction( state );

            System.out.format("\tState %s: %s\n", 
                        stateNames[state], 
                        actionNames[action] );

        }

    } // printPolicy

    /**
     * Prints out episode return avgs by number of episodes.
     * Each output line is "episodeNumber,avgReturn"
     */
    private static void printEpisodeAvgs() {

        for ( int epi = 0; epi < nepisodes; epi++ ) {

            System.out.format("%d,%f\n", epi + 1, episodeAvg[epi] );

        }

    } // printEpisodeAvgs

} // class
