#include "utils.hpp"

using namespace cv;
using namespace std;

struct State{
    int x = 0;
    int y = 0;
    int s = 0;
};

enum RMState
{
    start_to_waypoint,
    waypoint_to_goal,
    goal
};

class Pi{
public:

    Pi()
    {
        
    }

    void randomize()
    {
        // 1. 随机化
        pi[0] = (rand()/(double(RAND_MAX)));
        pi[1] = (rand()/(double(RAND_MAX)));
        pi[2] = (rand()/(double(RAND_MAX)));
        pi[3] = (rand()/(double(RAND_MAX)));
        // 2. 归一化
        double l = sqrt(pi[0]*pi[0]+pi[1]*pi[1]+pi[2]*pi[2]+pi[3]*pi[3]);
        pi[0] /= l ;
        pi[1] /= l ;
        pi[2] /= l ;
        pi[3] /= l ;
    }

    double pi[4]={0.25,0.25,0.25,0.25};
};

class Agent{

public:

    Agent()
    {
        agent_rm_state = RMState::start_to_waypoint;
    }

    void reset(State &init_state, bool success = false)
    {
        state = init_state;
        if(success) success_counter ++;
        else failure_counter ++;

        // 重置 RM 状态
        agent_rm_state = RMState::start_to_waypoint;
    }

    int take_action_epsilon_greedy()
    {
        // std::cout<<"epsilon : "<<epsilon<<std::endl;

        bool random_ = Rejector(epsilon);
        // GLIE
        if(epsilon>epsilon_min)
        {
            epsilon -= (e_decay/annealingSteps);
        }

        // 根据当前的RM状态，调用相应阶段的策略
        if(agent_rm_state == RMState::start_to_waypoint)
        {
            if(random_)
            {
                // 取随机 action
                double a = (rand()/(double(RAND_MAX)));
                return int(a*4);
            }
            else
            {
                // 取最大 action
                double max_q = max(Policy_start_to_waypoint[state.x][state.y].pi[0],Policy_start_to_waypoint[state.x][state.y].pi[1]);
                max_q = max(max_q,Policy_start_to_waypoint[state.x][state.y].pi[2]);
                max_q = max(max_q,Policy_start_to_waypoint[state.x][state.y].pi[3]);
                if(max_q == Policy_start_to_waypoint[state.x][state.y].pi[0])return ACTION_UP;
                if(max_q == Policy_start_to_waypoint[state.x][state.y].pi[1])return ACTION_LEFT;
                if(max_q == Policy_start_to_waypoint[state.x][state.y].pi[2])return ACTION_DOWN;
                if(max_q == Policy_start_to_waypoint[state.x][state.y].pi[3])return ACTION_RIGHT;
            }
        }
        else if(agent_rm_state == RMState::waypoint_to_goal)
        {
            cout<<"success_rate : "<<success_rate<<" RMState::waypoint_to_goal"<<std::endl;
            if(random_)
            {
                // 取随机 action
                double a = (rand()/(double(RAND_MAX)));
                return int(a*4);
            }
            else
            {
                // 取最大 action
                double max_q = max(Policy_waypoint_to_goal[state.x][state.y].pi[0],Policy_waypoint_to_goal[state.x][state.y].pi[1]);
                max_q = max(max_q,Policy_waypoint_to_goal[state.x][state.y].pi[2]);
                max_q = max(max_q,Policy_waypoint_to_goal[state.x][state.y].pi[3]);
                if(max_q == Policy_waypoint_to_goal[state.x][state.y].pi[0])return ACTION_UP;
                if(max_q == Policy_waypoint_to_goal[state.x][state.y].pi[1])return ACTION_LEFT;
                if(max_q == Policy_waypoint_to_goal[state.x][state.y].pi[2])return ACTION_DOWN;
                if(max_q == Policy_waypoint_to_goal[state.x][state.y].pi[3])return ACTION_RIGHT;
            }
        }
        
    }

    void update(int action, State &new_state, double reward)
    {
        
        // Q-update
        // 根据当前的 RM 状态，更新相应的策略
        // 在 RM 状态转移瞬间，根据QRM更新原则，更新上一个RM状态的策略

        if(reward >= 0)
        {
            // 更新策略价值，并更新状态
            // 1. 两个RM状态之间的转移状态
            if(agent_last_rm_state == RMState::start_to_waypoint && agent_rm_state == RMState::waypoint_to_goal)
            {
                double max_Q_s_new = max(Policy_waypoint_to_goal[new_state.x][new_state.y].pi[0],Policy_waypoint_to_goal[new_state.x][new_state.y].pi[1]);
                max_Q_s_new = max(max_Q_s_new,Policy_waypoint_to_goal[new_state.x][new_state.y].pi[2]);
                max_Q_s_new = max(max_Q_s_new,Policy_waypoint_to_goal[new_state.x][new_state.y].pi[3]);

                Policy_start_to_waypoint[state.x][state.y].pi[action] = alpha*(reward+gamma*max_Q_s_new)+(1-alpha)*Policy_start_to_waypoint[state.x][state.y].pi[action];
            }
            // 2. 第一个 RM 状态
            else if(agent_rm_state == RMState::start_to_waypoint)
            {
                double max_Q_s_new = max(Policy_start_to_waypoint[new_state.x][new_state.y].pi[0],Policy_start_to_waypoint[new_state.x][new_state.y].pi[1]);
                max_Q_s_new = max(max_Q_s_new,Policy_start_to_waypoint[new_state.x][new_state.y].pi[2]);
                max_Q_s_new = max(max_Q_s_new,Policy_start_to_waypoint[new_state.x][new_state.y].pi[3]);

                Policy_start_to_waypoint[state.x][state.y].pi[action] = alpha*(reward+gamma*max_Q_s_new)+(1-alpha)*Policy_start_to_waypoint[state.x][state.y].pi[action];
            }
            // 3. 第二个 RM 状态
            else if(agent_rm_state == RMState::waypoint_to_goal)
            {
                double max_Q_s_new = max(Policy_waypoint_to_goal[new_state.x][new_state.y].pi[0],Policy_waypoint_to_goal[new_state.x][new_state.y].pi[1]);
                max_Q_s_new = max(max_Q_s_new,Policy_waypoint_to_goal[new_state.x][new_state.y].pi[2]);
                max_Q_s_new = max(max_Q_s_new,Policy_waypoint_to_goal[new_state.x][new_state.y].pi[3]);

                Policy_waypoint_to_goal[state.x][state.y].pi[action] = alpha*(reward+gamma*max_Q_s_new)+(1-alpha)*Policy_waypoint_to_goal[state.x][state.y].pi[action];
            }

            state = new_state;
        }
        else
        {
            // 只更新策略价值
            // 2. 第一个 RM 状态
            if(agent_rm_state == RMState::start_to_waypoint)
            {
                double max_Q_s_new = max(Policy_start_to_waypoint[new_state.x][new_state.y].pi[0],Policy_start_to_waypoint[new_state.x][new_state.y].pi[1]);
                max_Q_s_new = max(max_Q_s_new,Policy_start_to_waypoint[new_state.x][new_state.y].pi[2]);
                max_Q_s_new = max(max_Q_s_new,Policy_start_to_waypoint[new_state.x][new_state.y].pi[3]);

                Policy_start_to_waypoint[state.x][state.y].pi[action] = alpha*(reward+gamma*max_Q_s_new)+(1-alpha)*Policy_start_to_waypoint[state.x][state.y].pi[action];
            }
            // 3. 第二个 RM 状态
            else if(agent_rm_state == RMState::waypoint_to_goal)
            {
                double max_Q_s_new = max(Policy_waypoint_to_goal[new_state.x][new_state.y].pi[0],Policy_waypoint_to_goal[new_state.x][new_state.y].pi[1]);
                max_Q_s_new = max(max_Q_s_new,Policy_waypoint_to_goal[new_state.x][new_state.y].pi[2]);
                max_Q_s_new = max(max_Q_s_new,Policy_waypoint_to_goal[new_state.x][new_state.y].pi[3]);

                Policy_waypoint_to_goal[state.x][state.y].pi[action] = alpha*(reward+gamma*max_Q_s_new)+(1-alpha)*Policy_waypoint_to_goal[state.x][state.y].pi[action];
            }
        }

        agent_last_rm_state = agent_rm_state;

        if(success_counter+failure_counter>=100)
        {
            if(success_counter>0.5)success_counter-=0.5;
            if(failure_counter>0.5)failure_counter-=0.5;
        }
        success_rate = success_counter/(failure_counter+success_counter);
    }

public:

    Pi Policy[MAP_W][MAP_H];

    Pi Policy_start_to_waypoint[MAP_W][MAP_H];
    Pi Policy_waypoint_to_goal[MAP_W][MAP_H];

    State state;

    double alpha = 0.5;
    double gamma = 0.9;
    double epsilon = 1;
    double epsilon_min = 0.1;
    double e_decay = epsilon - epsilon_min;
    double annealingSteps = 20000;

    double success_counter = 1;
    double failure_counter = 1;
    double success_rate = 0;

    RMState agent_rm_state;
    RMState agent_last_rm_state;
};
