
#include <cstdlib>

#include "Agent.hpp"

using namespace cv;
using namespace std;

class Environment{

public:

    Environment()
    {
        init();
    }

    void init()
    {
        // 随机初始化

        // 0. 初始化随机种子
        init_random_seed();

        // 1. 初始化freespace
        for(int x=0;x<MAP_W;x++)
            for(int y=0;y<MAP_H;y++)
                map[x][y] = 0;
        // 2. 初始化障碍物
        for(int x=0;x<MAP_W;x++)
            for(int y=0;y<MAP_H;y++)
                if(Rejector(obstacle_density))
                    map[x][y] = 1;
        // 3. 初始化目标位置
        for(;;)
        {
            int ox = MAP_W*(rand()/(double(RAND_MAX)));
            int oy = MAP_H*(rand()/(double(RAND_MAX)));
            if(map[ox][oy]==0)
            {
                map[ox][oy] = 2;
                goal.x = ox;
                goal.y = oy;
                break;
            }
        }
        // 4. 初始化 Agent 位置
        for(;;)
        {
            int ox = MAP_W*(rand()/(double(RAND_MAX)));
            int oy = MAP_H*(rand()/(double(RAND_MAX)));
            if(map[ox][oy]==0 && abs(goal.x-ox) >= MAP_W/3 && abs(goal.y-oy) >= MAP_H/3 )
            {
                smart_agent.state.x = ox;
                smart_agent.state.y = oy;
                smart_agent.state.s = 0;
                break;
            }
        }
        start = smart_agent.state;

        // 5. 初始化路径点位置
        for(;;)
        {
            int ox = MAP_W*(rand()/(double(RAND_MAX)));
            int oy = MAP_H*(rand()/(double(RAND_MAX)));
            if(map[ox][oy]==0)
            {
                //map[ox][oy] = 3;
                WP.x = ox;
                WP.y = oy;
                break;
            }
        }

        // 6. 初始化 Agent Policy
        for(int x=0;x<MAP_W;x++)
            for(int y=0;y<MAP_H;y++)
            {
                smart_agent.Policy[x][y].randomize();
                smart_agent.Policy_start_to_waypoint[x][y].randomize();
                smart_agent.Policy_waypoint_to_goal[x][y].randomize();
            }

        std::cout<<"environment initialized"<<std::endl;
    }

    void step()
    {
        // 1. 采样一个 action
        int action = smart_agent.take_action_epsilon_greedy();
        // 2. State Transition : 根据 action 计算 middle_state
        State middle_state;
        if(action == ACTION_UP)
        {
            if(smart_agent.state.y == 0)
            {
                middle_state = smart_agent.state;
                middle_state.s = -1;                // 出界
            }
            else
            {
                middle_state = smart_agent.state;
                middle_state.y -= 1;
                middle_state.s = map[middle_state.x][middle_state.y];
            }            
        }
        else if(action == ACTION_LEFT)
        {
            if(smart_agent.state.x == 0)
            {
                middle_state = smart_agent.state;
                middle_state.s = -1;                // 出界
            }
            else
            {
                middle_state = smart_agent.state;
                middle_state.x -= 1;
                middle_state.s = map[middle_state.x][middle_state.y];
            }            
        }
        else if(action == ACTION_DOWN)
        {
            if(smart_agent.state.y == MAP_H-1)
            {
                middle_state = smart_agent.state;
                middle_state.s = -1;                // 出界
            }
            else
            {
                middle_state = smart_agent.state;
                middle_state.y += 1;
                middle_state.s = map[middle_state.x][middle_state.y];
            }
        }
        else if(action == ACTION_RIGHT)
        {
            if(smart_agent.state.x == MAP_W-1)
            {
                middle_state = smart_agent.state;
                middle_state.s = -1;                // 出界
            }
            else
            {
                middle_state = smart_agent.state;
                middle_state.x += 1;
                middle_state.s = map[middle_state.x][middle_state.y];
            }
        }

        // 3. 根据 state 转移计算 Agent 的 rm_state 状态转移(直接更新到Agent)
        if(smart_agent.agent_rm_state == RMState::start_to_waypoint
            && middle_state.x == WP.x
            && middle_state.y == WP.y)
        {
            smart_agent.agent_rm_state = RMState::waypoint_to_goal;
        }
        else if(smart_agent.agent_rm_state == RMState::waypoint_to_goal
            && middle_state.s == 2)
        {
            smart_agent.agent_rm_state = RMState::goal;
        }

        // 4. 根据 middle_state 以及当前 Agent 的 rm_state 计算 reward
        double R = simple_reward_machine(middle_state);

        // 5. 决定最终状态
        finalize(middle_state);

        // 6. 更新 Agent， 决定是否结束本轮训练
        smart_agent.update(action,middle_state,R);

        if(R < 0){
            // 撞墙，reset
            smart_agent.reset(start,false);
        }
        if(smart_agent.agent_rm_state == RMState::goal){
            // 完成目标，reset
            smart_agent.reset(start,true);
        }
    }

    double simple_reward_machine(State &middle_state)
    {
        // 1. 出界：-1
        // 2. 踩障碍物：-0.1
        // 3. goal：1

        // 训练阶段，根据 rm_state 以及 middle_state 返回相应的 reward

        RMState rm_state = smart_agent.agent_rm_state;   // 查询当前的RM状态

        if(middle_state.s == -1){
            return -1;              // 所有阶段的共有 reward function
        }
        else if(middle_state.s == 1){
            return -1;              // 所有阶段的共有 reward function
        }
        else if(rm_state == RMState::start_to_waypoint && middle_state.x == WP.x && middle_state.y == WP.y)
        {
            // return 0.5;  // reward shaping 可以抛弃，直接返回 0
            return 0;
        }
        else if(rm_state == RMState::waypoint_to_goal && middle_state.s == 2){

            return 1;
        }
        else return 0;
    }

    void finalize(State &middle_state)
    {
        if(middle_state.s == 1){
            middle_state = smart_agent.state;
        }
    }

public:

    // 0 - free space
    // 1 - obstacle
    // 2 - goal
    // 3~7 - waypoints
    int map[MAP_W][MAP_H];

    double obstacle_density = 0.1;
    
    State start;
    State goal;
    State WP;
    Agent smart_agent;

};