import numpy as np
import gym
import matplotlib.pyplot as plt
from matplotlib import animation
import random
import time

def test_env():
    env = gym.make('CartPole-v0',render_mode='rgb_array')
    state = env.reset()
    steps = 0
    frames = []
    while True:
        frames.append(env.render())
        action = env.action_space.sample()
        state,reward,done,info,_ = env.step(action)
        steps+=1
        print(f'step: {steps}, state: {state}')
        time.sleep(1)   
        if done:
            break

def display_frames_as_gif(frames,output):
    fig = plt.figure(figsize=(frames[0].shape[1]/72.0,frames[0].shape[0]/72.0),dpi=72)
    patch = plt.imshow(frames[0])
    plt.axis('off')

    def animate(i):
        img = patch.set_data(frames[i]) 
        return img
    anim = animation.FuncAnimation(plt.gcf(),animate,frames=len(frames),interval=50)
    anim.save(output)
    
#display_frames_as_gif(frames,output='E:/chartGPT_app/Reinforcement_learning/rand_cartpole.gif')
 
class Agent:
    def __init__(self,action_space,n_states,eta=0.5,gamma=0.99,NUM_DIGITIZED=6):
        self.eta = 0.5
        self.gamma = gamma
        self.action_space = action_space
        self.NUM_DIGITIZED = NUM_DIGITIZED
        self.q_table = np.random.uniform(0,1,size=(NUM_DIGITIZED**n_states,self.action_space.n))
    @staticmethod
    def bins(clip_min,clip_max,num_bins):
        return np.linspace(clip_min,clip_max,num_bins+1)[1:-1]
    @staticmethod
    def digitize_state(observation,NUM_DIGITIZED):  
        pos,cart_v,angle,pole_v = observation
        digitized = [np.digitize(pos,bins=Agent.bins(-2.4,2.4,NUM_DIGITIZED)),
                    np.digitize(cart_v,bins=Agent.bins(-3,3,NUM_DIGITIZED)),
                    np.digitize(angle,bins=Agent.bins(-0.418,-0.418,NUM_DIGITIZED)),
                    np.digitize(pole_v,bins=Agent.bins(-2,2,NUM_DIGITIZED))
                    ]
        ind = sum([d*(NUM_DIGITIZED**i) for i,d in enumerate(digitized)])
        return ind

    def q_learning(self,obs,action,reward,obs_next):
        obs_ind = Agent.digitize_state(obs,self.NUM_DIGITIZED)
        obs_next_ind = Agent.digitize_state(obs_next,self.NUM_DIGITIZED)
        self.q_table[obs_ind,action] = self.q_table[obs_ind,action] + self.eta*(reward+max(self.q_table[obs_next_ind,:]) - self.q_table[obs_ind,action])
    
    def choose_action(self,state,episode):
        eps = 0.5*1/(episode+1)
        state_ind = Agent.digitize_state(state,self.NUM_DIGITIZED)
        if random.random() < eps:
            action = self.action_space.sample()
        else:
            action = np.argmax(self.q_table[state_ind,:])
        return action
                
def test_q_learning():
    env = gym.make('CartPole-v0',render_mode='rgb_array')  
    env.reset()
    action_space = env.action_space
    n_states = env.observation_space.shape[0]
    agent = Agent(action_space,n_states)
    max_episodes = 1000
    max_steps = 200

    continue_success_episodes = 0
    learning_finish_flag = False

    frames = []
    for episode in range(max_episodes):
        obs = env.reset()[0]
        for step in range(max_steps):
            if learning_finish_flag: 
                frames.append(env.render())
            action = agent.choose_action(obs,episode)
            obs_next,_,done,info,_ = env.step(action)
            if done:
                if step < 150:
                    reward = -1
                    continue_success_episodes = 0
                else:
                    reward = 1
                    continue_success_episodes += 1
            else:
                reward = 0
            agent.q_learning(obs,action,reward,obs_next)

            if done:
                print(f'episode :{episode}, finish: {step} time')
                break
            obs = obs_next
        if learning_finish_flag:
            break
        if continue_success_episodes >= 10:
            learning_finish_flag = True
            print(f'continue: success(step>150) more than 10 times')   
    
    if len(frames) > 0:
        display_frames_as_gif(frames, output='E:/chartGPT_app/Reinforcement_learning/cart_pole_q_learrning.gif')



test_q_learning()

# env = gym.make('CartPole-v0',render_mode='rgb_array')  
# env.reset()
# obs = env.reset()
# print(obs)



