
#include <iostream>
#include "Belief.h"
#include "ApproximateBeliefUpdate.h"
#include "ExactBeliefUpdate.h"
#include "follow/FollowSimulator.h"
#include "follow/FollowSuperState.h"
#include "follow/FollowReward.h"
#include "OnlineBAPOMDP.h"
#include <stdlib.h>
#include <time.h>

typedef FollowSimulator simulator_t;
typedef FollowSuperState super_state_t;
typedef super_state_t::action_t action_t;
typedef super_state_t::state_t state_t;
typedef super_state_t::obs_t obs_t;
typedef Belief<super_state_t> belief_t;

typedef ExactBeliefUpdate<belief_t> u_belief_t;
typedef ApproximateBeliefUpdate<u_belief_t> app_u_belief_t;
typedef OnlineBAPOMDP<app_u_belief_t, FollowReward> rl_t;

using namespace std;
int main(int argc, char *argv[]) {
    srand ( time(NULL) );
    //cout << "Hello!" << endl;
    simulator_t sim;
    rl_t rl;
    app_u_belief_t tau;
    //Initial belief
    belief_t b;
    state_t s1(0);
    state_t s2(1);
    super_state_t ss1(s1, 2, 3, 1, 2, 2, 2, 1, 3, 2, 2);
    super_state_t ss2(s2, 2, 3, 1, 2, 2, 2, 1, 3, 2, 2);
    b.set(ss1, 0.5);
    b.set(ss2, 0.5);
    
    //Parameters
    int d=3;
    int K=2;
    int it = 0;
    
    while(true) {  
        cout << "=== Iteration: " << it++ << " ===" << endl;
        action_t a;
        double q = rl(b,d,d,K,a); //Take the best next action and return the expect reward
        
        //Output actions
        if (a.getValue() == (int)FollowAction::East)
            cout << "Go East" << ", Expected long term reward: " << q << endl;
        else if (a.getValue() == (int)FollowAction::West)
            cout << "Go West" << ", Expected long term reward: " << q << endl;
        else if (a.getValue() == (int)FollowAction::North)
            cout << "Go North" << ", Expected long term reward: " << q << endl;
        else if (a.getValue() == (int)FollowAction::South)
            cout << "Go South" << ", Expected long term reward: " << q << endl;
        else
            cout << "No Move" << ", Expected long term reward: " << q << endl;
        double reward = sim.control(a); //The reward function is known so this is useless
        cout << "Reward Received: " << reward << endl;
        //cout << reward << endl;
        obs_t z = sim.sense();
        if (z.getValue() == (int)FollowObservation::East)
            cout << "Seen in the East" << endl;
        else if (z.getValue() == (int)FollowObservation::West)
            cout << "Seen in the West" << endl;
        else if (z.getValue() == (int)FollowObservation::North)
            cout << "Seen in the North" << endl;
        else if (z.getValue() == (int)FollowObservation::South)
            cout << "Seen in the South" << endl;
        else
            cout << "Unseen" << endl;
        
        if(z.isFinal()) {//If the final state is reach
            cout << "Terminated" << endl;
            break;
        } else {
            b = tau(b,a,z,K);
        }
    }
    return 0;
}
