#!/usr/bin/env python
# coding: utf-8

# In[1]:


import numpy as np
from core import Agent
import gym
from puckworld import PuckWorldEnv
from mountain_car import MountainCarEnv
from utils import learning_curve
import random


# In[2]:


class LinearSarsaLambdaAgent(Agent):
    '''仅适用状态为连续，行为为离散的情况'''
    def __init__(self, env):
        super(LinearSarsaLambdaAgent, self).__init__(env)
        # 特征由3部分组成：状态、行为、一个偏置项b
        # 如果状态或行为是离散的，状态的特征数为离散的总数
        self.n_state = self.obs_space.low.size
        self.n_action = self.action_space.n
        self.state_range = self.obs_space.high - self.obs_space.low
        self.n_feature = self.n_state + self.n_action + 1 # 包括一个偏置项
        
        self.w = np.zeros((self.n_feature)) # parameters

        
    def get_array(self, value, n = None):
        '''将一个索引代表的数据转化为one-hot向量
        '''
        if type(value) in [list, tuple, np.ndarray]:
            return value
        result = np.zeros(n)
        result[value] = 1
        return result
    
    def normalized_state(self, state):
        return self.get_array(state, self.n_state)/self.state_range
    
    def get_features(self, state, action):
        state = self.normalized_state(state)
        action = self.get_array(action, self.n_action)
        return np.hstack([state, action, [1]])
    
    def get_Q(self, state, action):
        # 均一化
        features = self.get_features(state, action)
        return np.dot(self.w, features)
    
    def get_max_Q(self, state):
        max_q = -float('inf')
        for a_opt in range(self.n_action):
            q = self.get_Q(state, a_opt)
            if q >= max_q:
                max_q = q
        return max_q
        
    def greedy_policy(self, state):
        max_q, a_max_q = -float('inf'), []
        for a_opt in range(self.n_action):
            q = self.get_Q(state, a_opt)
            #print(q)
            if q > max_q:
                max_q = q
                a_max_q = [a_opt]
            elif q == max_q:
                a_max_q.append(a_opt)
        if a_max_q == []:
            print("no action to be choice")
        return random.choice(a_max_q)
    
    def epsilon_greedy_policy(self, state, epsilon = 0.05):
        rand_value = random.random()
        if rand_value < epsilon:
            return random.choice([i for i in range(self.n_action)])
        else:
            return self.greedy_policy(state)
        
    def perform_policy(self, state, epsilon = 0.05):
        return self.epsilon_greedy_policy(state, epsilon)
    
    def learning_method(self,lambda_ = 0.9, gamma = 0.99, alpha = 0.02, epsilon = 0.2, display = False):
        self.state = self.env.reset()
        s0 = self.state
        if display:
            self.env.render()
        time_in_episode, total_reward = 0, 0
        is_done = False
        loss = 0
        E = np.zeros((self.n_feature)) # reset traces
        while not is_done:
            # add code here
            s0 = self.state
            a0 = self.perform_policy(s0, epsilon)
            s1, r1, is_done, info, total_reward = self.act(a0)
            if display:
                self.env.render()
                
            old_Q = self.get_Q(s0, a0)
            td_error = r1 - old_Q
            E[a0] += 1
            if not is_done:
                td_error += gamma * self.get_max_Q(s1)
            
            gradient = self.get_features(s0, a0)
            loss += td_error * td_error
            delta_w = alpha * td_error * E
            self.w += delta_w
            #print("gradient:{}".format(gradient))           
            #print("weight:{}".format(self.w))
            E = gamma * lambda_ * E + gradient
             
            time_in_episode += 1
        loss /= 2*time_in_episode   
        if display:
            print("epsilon:{:3.2f},loss:{:3.2f},{}".format(epsilon,loss,self.experience.last_episode))
        return time_in_episode, total_reward  


# In[3]:


env = MountainCarEnv()
env.reset()
agent = LinearSarsaLambdaAgent(env)


# In[4]:


state = env.reset()
agent.normalized_state(state)
agent.state_range


# In[5]:


data = agent.learning(gamma=0.999,          # 衰减引子
               epsilon = 1,
               decaying_epsilon = True,
               alpha = 1e-4, 
               max_episode_num = 200, 
               min_epsilon = 1e-3,
               min_epsilon_ratio = 0.8,
               display = False)


# In[ ]:


print(agent.w)


# In[ ]:


learning_curve(data, 2, 1)


# In[ ]:


data = agent.learning(gamma=0.99,          # 衰减引子
               epsilon = 0.001,
               decaying_epsilon = False,
               alpha = 1e-5, 
               max_episode_num = 100, 
               display = True)


# In[ ]:




