/* 
 * File:   OnlineBAPOMDP.h
 * Author: Hadrien
 *
 * Created on 11 juillet 2012, 14:59
 */

#ifndef ONLINEBAPOMDP_H
#define	ONLINEBAPOMDP_H
#include <vector>
#include <float.h>

template<typename APPROXIMATEBELIEFUPDATE, typename REWARDFUNCTION>
class OnlineBAPOMDP {
public:
    typedef APPROXIMATEBELIEFUPDATE app_u_belief_t;
    typedef REWARDFUNCTION reward_function_t;
    typedef typename app_u_belief_t::belief_t belief_t;
    typedef typename belief_t::super_state_t super_state_t;
    typedef typename super_state_t::state_t state_t;
    typedef typename super_state_t::obs_t obs_t;
    typedef typename super_state_t::action_t action_t;
    OnlineBAPOMDP(){
        gamma = 0.95;
    }
    virtual ~OnlineBAPOMDP(){
        
    }
    
    double operator()(const belief_t& b, const int& d, const int& maxD, const int& K, action_t& a) const{
        char tab[5*(maxD-d)+1];
        for(int k = 0; k < 5*(maxD-d); k++)
            tab[k] = ' ';
        tab[5*(maxD-d)] = '\0';
        if (d==0) {
            action_t a_best = a;
            double r = maxImmediateReward(b, a_best);
            //std::cout << tab << "child{\n";
            //std::cout << tab << "  node (Leaf"<< rand() <<") [leaf] {$A=" << a_best.index() << ", R=" << r << "$"<< b <<"}\n"<< tab <<"}\n";
            return r;
        }
        
        double max_reward = -DBL_MAX;
        int maxA;
        for(int a_index = 0; a_index < action_t::size; a_index++){            
            action_t act(a_index);
            double reward = immediateReward(b,act);
            
            //std::cout << tab << "child{\n";
            //std::cout << tab << "  node (D" << d << "Act" << a_index << rand() << ") [reward] {$" << reward << "$}\n";
            
            for(int z_index = 0; z_index < obs_t::size; z_index++){
                double prob = nextObsProb(b, a_index, z_index);
                
                //std::cout << tab << "  child{\n";
                //std::cout << tab << "    node (D" << d << "Obs" << z_index << rand() <<") [obs] {$" << prob << "$}\n";                
                
                if(prob>0) {
                    obs_t z(z_index);
                    belief_t newBelief = tau(b,act,z,K);
                    reward += gamma * prob * (*this)(newBelief, d-1, maxD, K, act);
                }
                //std::cout << tab << "    edge from parent node[obs_lbl] {"<< ((z_index==0)?"Hear Left":"Hear Right") <<"}\n";
                //std::cout << tab << "  }\n";
            }
            //std::cout << "-> Profondeur " << maxD-d << " , a= " << a_index << " , r= " << reward << std::endl;
            if(reward>max_reward){
                max_reward = reward;
                maxA = a_index;
            }
            //std::cout << tab << "  edge from parent node[act_lbl] {"<< ((a_index<2)?((a_index==0)?"Left":"Right"):"Listen") <<"}";
            //std::cout << " node[reward_lbl] {"<< reward <<"}\n";
            //std::cout << tab << "}\n";
        }
        //std::cout << "     Profondeur " << maxD-d << " , MAXa= " << maxA << " , MAXr= " << max_reward << std::endl; 
        if(d==maxD){
            action_t bestA(maxA);
            a = bestA;
        }
        return max_reward;
    }
    
private:
    double nextObsProb(const belief_t& b, const int& a, const int& z) const{
        std::vector<super_state_t> Sb = b.getStates();
        double prob = 0;
        for(typename std::vector<super_state_t>::const_iterator it = Sb.begin(); it != Sb.end(); ++it) {
            int s = it->getState().index();
            for(int ss = 0; ss < state_t::size; ss++) {
                prob += b.get(*it)*it->getT(s,a,ss)*it->getO(ss,a,z);
            }
        }
        return prob;
    }
    double immediateReward(const belief_t& b, const action_t& a) const{
        std::vector<super_state_t> Sb = b.getStates();
        double reward = 0;
        for(typename std::vector<super_state_t>::const_iterator it = Sb.begin(); it != Sb.end(); ++it) {
            reward += b.get(*it)*reward_function(it->getState(), a);
        }
        return reward;
    }
    double maxImmediateReward(const belief_t& b, action_t& a) const{
        double max_reward = -DBL_MAX;
        for(int a_index = 0; a_index < action_t::size; a_index++){
            action_t act(a_index);
            double r = immediateReward(b,act);
            if(r>max_reward) {
                max_reward = r;
                a = act;
            }
            //std::cout << "->  Leaf, a= " << a_index << " , r= " << r << std::endl; 
        }
        return max_reward;
    }
    reward_function_t reward_function;
    app_u_belief_t tau;
    double gamma;
};

#endif	/* ONLINEBAPOMDP_H */

