import gym
import numpy as np
import matplotlib.pyplot as plt


g_eps = 0.1  
lr = 0.1
num_episodes = 100000
gamma = 0.9
test_cnt = 5000 
win_arr = np.zeros(5000)
env = gym.make('Blackjack-v1', natural=False, sab=False).unwrapped

def generate_episode(q_table, eps):
    state = env.reset()
    episode = []
    q0 = 0
    q1 = 0
    while True:
        if q_table.get((state, 0)) != None:
            q0 = q_table[(state,0)]
        if q_table.get((state, 1)) != None:
            q1 = q_table[(state,1)]
        
        if q0 == q1:
            action = np.random.choice(np.arange(2))
        elif q0 < q1:
            probability = np.array([eps, 1-eps])
            action = np.random.choice(np.arange(2), p=probability)
        else:
            probability = np.array([1-eps, eps])
            action = np.random.choice(np.arange(2), p=probability)
        next_state, reward, done, _ = env.step(action) 
        episode.append((state, action, reward))
        state = next_state
        if done:
            break

    return episode


def MC():
    q_table = {}
    N = {}
    Sum_all_episodes = {}
    eps = g_eps
    for k in range(num_episodes):
        eps = eps - eps*(k/num_episodes)
        episode = generate_episode(q_table, eps)
        explored_sa_pair = []
        for i,state_action_reward in enumerate(episode):
            state, action, _ = state_action_reward
            last_reward = episode[-1][-1]
            horizon_to_end = len(episode) - (i+1)
            return_of_state = last_reward*(gamma**horizon_to_end)
            
            if (state, action) in explored_sa_pair:
                continue
            else:
                explored_sa_pair.append((state, action))

                if Sum_all_episodes.get((state,action)) == None:
                    Sum_all_episodes[(state,action)] = return_of_state
                else:
                    Sum_all_episodes[(state,action)] += return_of_state
                
                if N.get((state,action)) == None:
                    N[(state,action)] = 1
                else:
                    N[(state,action)] += 1
                    
                q_table[(state,action)] = Sum_all_episodes[(state,action)]/N[(state,action)]
    return q_table

def TD():
    q_table = {}
    eps = g_eps
    for k in range(num_episodes):
        eps = eps - eps*(k/num_episodes)
        episode = generate_episode(q_table, eps)
        explored_sa_pair = []
        for i,state_action_reward in enumerate(episode):
            state, action, reward = state_action_reward
            if i == 0:
                q_table[(state,action)] = reward
                last_state = state
                last_action = action
            else:
                if (state, action) in explored_sa_pair:
                    continue
                else:
                    explored_sa_pair.append((state, action))

                    td_target = reward + gamma * q_table[(state,action)]
                    td_error = td_target - q_table[(state,action)]
                    q_table[(last_state,last_action)] += lr * td_error
            last_state = state
            last_action = action

    return q_table

def test(q_table, win_arr):
    win_cnt = 0
    for i in range(test_cnt):
        state = env.reset()
        q0 = 0
        q1 = 0
        while True:
            if q_table.get((state, 0)) != None:
                q0 = q_table[(state,0)]
            if q_table.get((state, 1)) != None:
                q1 = q_table[(state,1)]
            
            if q0 == q1:
                action = np.random.choice(np.arange(2))
            elif q0 < q1:
                action = 1
            else:
                action = 0
            next_state, reward, done, _ = env.step(action) 
            
            if reward == 1:
                win_cnt += 1
            
            state = next_state
            if done:
                break
        win_arr[i] = win_cnt/(i+1)


    print(win_cnt/test_cnt)
    return

x = np.arange(0, 5000)

policy = MC()
test(policy, win_arr)
fig, ax = plt.subplots()
ax.set_xlabel('MC')
ax.set_ylabel('Win rate')
plt.plot(x, win_arr)
plt.show()

policy = TD()
test(policy, win_arr)
ax.set_xlabel('TD')
ax.set_ylabel('Win rate')
plt.plot(x, win_arr)
plt.show()
