import numpy as np
import paddle
import parl


class DimOneAgent(parl.Agent):

    def __init__(self, algorithm, obs_dim, act_dim, e_greed=0.1, e_greed_decrement=1e-6, update_target_steps=200):
        """
        :param algorithm:  DQN算法
        :param obs_dim:  状态空间维度
        :param act_dim:  动作空间维度
        """
        super(DimOneAgent, self).__init__(algorithm)
        self.update_target_steps = update_target_steps
        self.global_step = 0
        self.obs_dim = obs_dim
        self.act_dim = act_dim
        self.e_greed = e_greed
        self.e_greed_decrement = e_greed_decrement

    def sample(self, obs):
        """
        :param args:
        :param kwargs:
        :return: action
        """
        # 从算法中获取动作, 带有探索
        p = np.random.rand()
        if p < self.e_greed:
            act = np.random.randint(self.act_dim)
        else:
            act = self.predict(obs)
        self.e_greed = max(0, self.e_greed - self.e_greed_decrement)
        return act

    def predict(self, obs):
        obs = paddle.to_tensor(obs, dtype='float32')  # 将obs转换为tensor
        pred_q = self.alg.predict(obs)  # 使用预测模型预测Q值
        act = pred_q.argmax().numpy()[0]  # 选择Q最大的下标，即对应的动作
        # print(obs)
        # print(pred_q)
        return act

    def learn(self, obs, act, reward, next_obs, terminal):
        # 每隔200个training steps同步一次model和target_model的参数
        if self.global_step % self.update_target_steps == 0:
            self.alg.sync_target()  # 同步target_model参数
        self.global_step += 1  # 记录训练的step数

        act = np.expand_dims(act, -1)  # 将act转换为二维数据
        reward = np.expand_dims(reward, -1)  # 将reward转换为二维数据
        terminal = np.expand_dims(terminal, -1)  # 将terminal转换为二维数据

        obs = paddle.to_tensor(obs, dtype='float32')  # 将obs转换为tensor
        act = paddle.to_tensor(act, dtype='int32')  # 将act转换为tensor
        reward = paddle.to_tensor(reward, dtype='float32')  # 将reward转换为tensor
        next_obs = paddle.to_tensor(next_obs, dtype='float32')  # 将next_obs转换为tensor
        terminal = paddle.to_tensor(terminal, dtype='float32')  # 将terminal转换为tensor
        loss = self.alg.learn(obs, act, reward, next_obs, terminal)  # 使用算法更新模型参数
        return loss.numpy()[0]  # 返回loss
