import numpy as np
import gym
import matplotlib.pyplot as plt 

fig = plt.figure(figsize=(5,5))
ax = plt.gca()
ax.set_xlim(0,3)
ax.set_ylim(0,3)

plt.plot([2,3],[1,1],color='red',linewidth=2)
plt.plot([0,1],[1,1],color='red',linewidth=2)
plt.plot([1,1],[1,2],color='red',linewidth=2)
plt.plot([1,2],[2,2],color='red',linewidth=2)

plt.text(0.5,2.5,'S0',size=14,ha='center')
plt.text(1.5,2.5,'S1',size=14,ha='center')
plt.text(2.5,2.5,'S2',size=14,ha='center')

plt.text(0.5,1.5,'S3',size=14,ha='center')
plt.text(1.5,1.5,'S4',size=14,ha='center')
plt.text(2.5,1.5,'S5',size=14,ha='center')

plt.text(0.5,0.5,'S6',size=14,ha='center')
plt.text(1.5,0.5,'S7',size=14,ha='center')
plt.text(2.5,0.5,'S8',size=14,ha='center')

plt.tick_params(axis='both',which='both',
bottom=False,top=False,right=False,left=False,
labelbottom=False,labelleft=False
)

line,=ax.plot([0.5],[2.5],marker='o',color='g',markersize=60)

class MazeEnv(gym.Env):
    def __init__(self):
        self.state = 0
        pass
    def reset(self):
        self.state = 0
        return self.state
    def step(self,action):
        if action == 0:
            self.state -= 3
        elif action == 1:
            self.state += 1
        elif action == 2:
            self.state += 3
        elif action == 3:
            self.state -= 1
        done = False
        if self.state == 8:
            done = True
        return self.state,1,done,{}

#print(softmax_cvt_theta_0_to_pi(theta_0))

#plt.show()

class Agent:
    def __init__(self):
        self.actions = list(range(4))
        self.theta_0 = np.asarray([
                        [np.nan,1,1,np.nan],
                        [np.nan,1,np.nan,1],
                        [np.nan,np.nan,1,1],
                        [1,np.nan,np.nan,np.nan],
                        [np.nan,1,1,np.nan],
                        [1,np.nan,np.nan,1],
                        [np.nan,1,np.nan,np.nan],
                        [1,1,np.nan,1]
                    ])
        self.theta = self.theta_0
        self.pi = self.softmax_cvt_theta_0_to_pi()
        self.beta = 0.1

    def cvt_theta_0_to_pi(self):
        m,n = self.theta.shape
        pi = np.zeros((m,n))
        for r in range(m):
            pi[r,:] = self.theta[r,:] / np.nansum(self.theta[r,:])
        return np.nan_to_num(pi)

    def softmax_cvt_theta_0_to_pi(self,beta=1.0):
        m,n = self.theta.shape
        pi = np.zeros((m,n))
        exp_theta = np.exp(self.theta*beta)
        for r in range(m):
            pi[r,:] = exp_theta[r,:] / np.nansum(exp_theta[r,:])
        return np.nan_to_num(pi)

    def update_theta(self,s_a_history):
        T = len(s_a_history)-1
        m,n = self.theta.shape
        delta_theta = self.theta.copy()
        for i in range(m):
            for j in range(n):
                if not (np.isnan(self.theta_0[i,j])):
                    sa_i = [sa for sa in s_a_history if sa[0]==i]
                    sa_ij = [sa for sa in s_a_history if (sa[0]==i and sa[1]==j)]
                    N_i = len(sa_i)
                    N_ij = len(sa_ij)
                    delta_theta[i,j] = (N_ij-self.pi[i,j]*N_i)/T
        self.theta = self.theta + self.beta*delta_theta
        return self.theta
    
    def update_pi(self):
        self.pi = self.softmax_cvt_theta_0_to_pi()
        return self.pi
    def choose_action(self,state):
        action = np.random.choice(self.actions,p=self.pi[state,:])
        return action

stop_eps = 1e-4
env = MazeEnv()
agent = Agent()
while True:
    state = env.reset()
    s_a_history = [[state,np.nan]]
    while True:
        action = agent.choose_action(state)
        s_a_history[-1][1] = action
        state,reward,done,_ = env.step(action)
        s_a_history.append([state,np.nan])
        if state == 8 or done:
            break
    agent.update_theta(s_a_history)
    pi = agent.pi.copy()
    agent.update_pi()

    delta = np.sum(np.abs(agent.pi-pi))
    print(len(s_a_history),delta)
    if delta<stop_eps:
        break

print(agent.pi)






