# from tqdm import tqdm
import numpy as np
import torch
import collections
import random
import time
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt

class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity) 

    def add(self, state, action, reward, next_state, done): 
        self.buffer.append((state, action, reward, next_state, done)) 

    def sample(self, batch_size): 
        transitions = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = zip(*transitions)
        return np.array(state), np.array(action), reward, np.array(next_state), done 

    def size(self): 
        return len(self.buffer)

class ReplayBuffer_on:
    # 越早加进来的，索引值越小
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity) 

    def add(self, state, action, reward, next_state): 
        self.buffer.append((state, action, reward, next_state)) 

    def obtain_all(self): 
        transitions = list(self.buffer)
        state, action, reward, next_state = zip(*transitions)
        return np.array(state), np.array(action), reward, np.array(next_state) 

    def size(self): 
        return len(self.buffer)
    
def compute_advantage(gamma, lmbda, td_delta):
    # 将时序差分残差定义为优势函数?????????????
    # 广义优势估计（Generalized Advantage Estimation，GAE）
    td_delta = td_delta.detach().numpy()
    advantage_list = []
    advantage = 0.0
    for delta in td_delta[::-1]:
        advantage = gamma * lmbda * advantage + delta
        advantage_list.append(advantage)
    advantage_list.reverse()
    return torch.tensor(np.array(advantage_list), dtype=torch.float)

        
def moving_average(a, window_size):
    cumulative_sum = np.cumsum(np.insert(a, 0, 0)) 
    middle = (cumulative_sum[window_size:] - cumulative_sum[:-window_size]) / window_size
    r = np.arange(1, window_size-1, 2)
    begin = np.cumsum(a[:window_size-1])[::2] / r
    end = (np.cumsum(a[:-window_size:-1])[::2] / r)[::-1]
    return np.concatenate((begin, middle, end))


def smooth(data, weight=0.9):  
    '''用于平滑曲线，类似于Tensorboard中的smooth

    Args:
        data (List):输入数据
        weight (Float): 平滑权重，处于0-1之间，数值越高说明越平滑，一般取0.9

    Returns:
        smoothed (List): 平滑后的数据
    '''
    last = data[0]  # First value in the plot (first timestep)
    smoothed = list()
    for point in data:
        smoothed_val = last * weight + (1 - weight) * point  # 计算平滑值
        smoothed.append(smoothed_val)                    
        last = smoothed_val                                
    return smoothed

##状态的归一化
class Nomalize:
    def __init__(self, N_S):
        self.mean = np.zeros((N_S,))
        self.std = np.zeros((N_S, ))
        self.stdd = np.zeros((N_S, ))
        self.n = 0

    def __call__(self, x):
        x = np.asarray(x)
        self.n += 1
        if self.n == 1:
            self.mean = x
        else:
            # 更新样本均值和方差
            old_mean = self.mean.copy()
            self.mean = old_mean + (x - old_mean) / self.n
            self.stdd = self.stdd + (x - old_mean) * (x - self.mean)
        if self.n > 1:
            self.std = np.sqrt(self.stdd / (self.n - 1))
        else:
            self.std = self.mean
        # 状态归一化
        x = x - self.mean
        x = x / (self.std + 1e-8)
        x = np.clip(x, -5, +5)
        return x
    
def train_on_policy_agent0125(env, agent, num_episodes):
    state_dim = env.observation_space.shape[0]
    nomalize0125 = Nomalize(state_dim) # 状态归一化
    return_list = []
    time_start = time.perf_counter()  # 记录开始时间
    for i_episode in range(int(num_episodes)):
        episode_return = 0
        transition_dict = {'states': [], 'actions': [], 'next_states': [], 'rewards': [], 'dones': []}
        state = env.reset()
        state = nomalize0125(state) # 状态归一化
        done = False
        while not done:
            action = agent.take_action(state)
            next_state, reward, done, _ = env.step(action)
            next_state = nomalize0125(next_state)
            transition_dict['states'].append(state)
            transition_dict['actions'].append(action)
            transition_dict['next_states'].append(next_state)
            transition_dict['rewards'].append(reward)
            transition_dict['dones'].append(done)
            state = next_state
            episode_return += reward
        return_list.append(episode_return)
        agent.update(transition_dict) # 1个episode结束，更新
        if (i_episode+1)%100 == 0:
            print(f"episodes: {i_episode+1}/{num_episodes}, rewards: {episode_return:.2f}, time consumption: {(time.perf_counter() - time_start):.2f}(s)")
    return return_list


def train_on_policy_agent(env, agent, num_episodes):
    return_list = []
    time_start = time.perf_counter()  # 记录开始时间
    for i_episode in range(int(num_episodes)):
        episode_return = 0
        transition_dict = {'states': [], 'actions': [], 'next_states': [], 'rewards': [], 'dones': []} # 其中只保留一个 episode 的数据
        state = env.reset()
        done = False
        while not done:
            action = agent.take_action(state)
            next_state, reward, done, _ = env.step(action)
            transition_dict['states'].append(state)
            transition_dict['actions'].append(action)
            transition_dict['next_states'].append(next_state)
            transition_dict['rewards'].append(reward)
            transition_dict['dones'].append(done)
            state = next_state
            episode_return += reward
        return_list.append(episode_return)
        agent.update(transition_dict) # 1个episode结束，更新
        if (i_episode+1)%100 == 0:
            print(f"{i_episode+1}/{num_episodes}, time: {(time.perf_counter() - time_start):.2f}(s), rewards: {episode_return:.2f}")
            # print(f"episodes: {i_episode+1}/{num_episodes}, rewards: {episode_return:.2f}, time consumption: {(time.perf_counter() - time_start):.2f}(s)")
    return return_list

def get_action_real(action, action_low, action_high):
    action_scale = (action_high - action_low)/2
    action_bias = (action_high + action_low)/2
    action_real = action_scale * action + action_bias
    return action_real

def train_off_policy_agent(env, agent, num_episodes, replay_buffer, minimal_size, batch_size, action_low, action_high):
    return_list = []
    time_start = time.perf_counter()  # 记录开始时间
    for i_episode in range(int(num_episodes)):
        episode_return = 0
        state = env.reset()
        done = False
        while not done:
            # action = agent.take_action_noise(state) # DDPG
            action = agent.take_action(state) # SAC
            action_real = get_action_real(action, action_low, action_high)
            next_state, reward, done, _ = env.step(action_real)
            replay_buffer.add(state, action, reward, next_state, done)
            state = next_state
            episode_return += reward
            if replay_buffer.size() > minimal_size: # 如果回放缓冲区足够大
                b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size) # 采样
                transition_dict = {'states': b_s, 'actions': b_a, 'next_states': b_ns, 'rewards': b_r, 'dones': b_d}
                agent.update(transition_dict) # 更新
        return_list.append(episode_return)
        if (i_episode+1)%10 == 0:
            print(f"{i_episode+1}/{num_episodes}, time: {(time.perf_counter() - time_start):.2f}(s), rewards: {episode_return:.2f}")
            # print(f"episodes: {i_episode+1}/{num_episodes}, rewards: {episode_return:.2f}, time consumption: {(time.perf_counter() - time_start):.2f}(s)")
    return return_list


def dis_to_con(action_discrete, env, action_dim):  # 离散动作转回连续的函数
    action_lowbound = env.action_space.low[0]  # 连续动作的最小值
    action_upbound = env.action_space.high[0]  # 连续动作的最大值
    action_continue = action_lowbound + (action_discrete /(action_dim - 1)) * (action_upbound -action_lowbound)
    return action_continue
                            
def train_DQN(agent, env, num_episodes, replay_buffer, minimal_size, batch_size):
    return_list = [] # 长度为 num_episodes
    max_q_value_list = [] # 长度为 num_episodes * 每个episode的时隙个数
    max_q_value = 0
    time_start = time.perf_counter()  # 记录开始时间
    for i_episode in range(int(num_episodes)):
        episode_return = 0
        state = env.reset()
        done = False
        while not done:
            action = agent.take_action_epsilon_greedy(state)
            max_q_value = agent.max_q_value(state) * 0.005 + max_q_value * 0.995  # 平滑处理
            max_q_value_list.append(max_q_value)  # 保存每个状态的最大Q值
            action_continuous = dis_to_con(action, env,agent.action_dim)
            next_state, reward, done, _ = env.step([action_continuous])
            replay_buffer.add(state, action, reward, next_state, done)
            state = next_state
            episode_return += reward
            if replay_buffer.size() > minimal_size:
                b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                transition_dict = {'states': b_s,'actions': b_a,'next_states': b_ns,'rewards': b_r,'dones': b_d}
                agent.update(transition_dict)
        return_list.append(episode_return)
        if (i_episode + 1) % 10 == 0:
            print(f"episodes: {i_episode+1}/{num_episodes}, rewards: {episode_return:.2f}, time consumption: {(time.perf_counter() - time_start):.2f}(s)")
    return return_list, max_q_value_list



def test_agent(env, agent, num_episodes):
    return_list = []
    for i_episode in range(int(num_episodes)):
        episode_return = 0
        state = env.reset()
        done = False
        while not done:
            action = agent.take_action(state)
            next_state, reward, done, _ = env.step(action)
            state = next_state
            episode_return += reward
        return_list.append(episode_return)
    return return_list


        

def test_agent_render(env, agent):
    # 环境初始化
    state = env.reset()
    # 循环交互
    while True:
        # 渲染画面
        env.render()
        # 从动作空间随机获取一个动作
        # action = env.action_space.sample()
        action = agent.take_action(state)
        # agent与环境进行一步交互
        next_state, reward, done, _ = env.step(action)
        # print('state = {0}; reward = {1}'.format(next_state, reward))
        state = next_state
        # 判断当前episode 是否完成
        if done:
            print('done')
            break
        time.sleep(0.06)
    # 环境结束
    env.close()

def plot_results(return_list, env_name, alg_name, string_train_test, moving_average_weight):
    plt.figure()
    plt.plot(list(range(len(return_list))), return_list, label='rewards')
    return_list_mv = moving_average(return_list, moving_average_weight)
    # return_list_mv = smooth(return_list, weight=0.9)
    plt.plot(list(range(len(return_list_mv))), return_list_mv, label='smoothed')
    plt.xlabel('Episodes')
    plt.ylabel('Rewards')
    plt.title(f'{alg_name} on {env_name} ({string_train_test})')
    plt.legend(loc='best')
    plt.grid()
    plt.show()