#include "Agent.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>

Agent::Agent(double epsilonInput, double alphaInput, double gammaInput, int numStates, int numActionsInput)
{
        epsilon = epsilonInput;
        alpha = alphaInput;
        gamma = gammaInput;
        numActions = numActionsInput;
}

int Agent::getAction(State* state, vector<int> possibleActions)
{
    //If the state is not in the map
    int y = QTable.count(state);
    if(QTable.count(state) == 0)
    {
        //Add the new found state to the map and instantiate all the actions to small random values
        vector<pair<Actions, double> > newActions;
        for(int i = 0; i < numActions; i++)
        {
            newActions.push_back(make_pair((Actions)i,(double)rand()/(double)RAND_MAX));
        }
        QTable.insert(make_pair(state, newActions));
        QTable2.insert(make_pair(state, 1.0));
    }
    int action = 0;
    double random = double((rand() % 100 + 1))/100;
    if(random <= (1 - epsilon))
    {
        action = rand() % numActions;
    }
    else
    {
        double bestQ = QTable[state][0].second;
        for(int i = 1; i < numActions; i++)
        {
            if(QTable[state][i].second >= bestQ)
            {
                bestQ = QTable[state][i].second;
                action = i;
            }
        }
    }
    bool found = false;
    for(unsigned int i = 0; i < possibleActions.size(); i++)
    {
        if(possibleActions[i] == action)
        {
            found = true;
        }
    }
    if(found) return action;
    action = possibleActions[rand() % possibleActions.size()];
    return action;
}

int Agent::greedyGetAction(State* state, vector<int> possibleActions)
{
    int action = 0;
    double bestQ = QTable[state][0].second;
    for(int i = 1; i < numActions; i++)
    {
        if(QTable[state][i].second >= bestQ)
        {
            bestQ = QTable[state][i].second;
            action = i;
        }
    }
    bool found = false;
    for(unsigned int i = 0; i < possibleActions.size(); i++)
    {
        if(possibleActions[i] == action)
        {
            found = true;
        }
    }
    if(found) return action;
    action = possibleActions[rand() % possibleActions.size()];
    return action;
}

void Agent::updateReward(State* prevState, int prevAction, State* state, double reward)
{
    //Max Future Value
    int action = 0;
    int y = QTable.count(prevState);
    if((y == 1 && QTable[prevState].empty()) || y == 0) return;
    y = QTable.count(state);
    if((y == 1 && QTable[state].empty()) || y == 0) return;
    double bestQ = QTable[state][0].second;
    for(int i = 1; i < numActions; i++)
    {
        if(QTable[state][i].second >= bestQ)
        {
            bestQ = QTable[state][i].second;
            action = i;
        }
    }

    //Update Q Table

    double learnedValue = alpha * (reward + gamma * QTable[state][action].second - QTable[prevState][prevAction].second);
    QTable[prevState][prevAction].second += learnedValue;

}

void Agent::updateReward(State* prevState, int prevAction, double reward, double nextReward)
{
    //Update Q Table
    int y = QTable.count(prevState);
    if((y == 1 && QTable[prevState].empty()) || y == 0) return;
    double z = QTable2[prevState];
    double learnedValue = alpha * (reward + gamma * nextReward - QTable[prevState][prevAction].second);
    QTable[prevState][prevAction].second += learnedValue;

}


Agent::~Agent()
{
    //dtor
}
