import parl
import paddle
import copy
class DQN(parl.Algorithm):
    def __init__(self, model, act_dim=None, gamma=None, lr=None):
        self.model = model
        self.target_model = copy.deepcopy(model)  # 复制predict网络得到target网络，实现fixed-Q-target 功能

        # 数据类型是否正确
        assert isinstance(act_dim, int)
        assert isinstance(gamma, float)
        assert isinstance(lr, float)

        self.act_dim = act_dim
        self.gamma = gamma
        self.lr = lr
        self.optimizer = paddle.optimizer.Adam(learning_rate=self.lr, parameters=self.model.parameters())  # 使用Adam优化器

    # 预测功能
    def predict(self, obs):
        return self.model.forward(obs)

    def learn(self, obs, action, reward, next_obs, terminal):
        # 从target_model中获取 max Q' 的值，用于计算target_Q
        next_predict_Q = self.target_model.forward(next_obs)
        best_v = paddle.max(next_predict_Q, axis=-1)  # next_predict_Q的每一个维度（行）都求最大值，因为每一行就对应一个St,行数就是我们输入数据的批次大小
        best_v.stop_gradient = True  # 阻止梯度传递,因为要固定模型参数
        terminal = paddle.cast(terminal, dtype='float32')  # 转换数据类型，转换为float32
        target = reward + (1.0 - terminal) * self.gamma * best_v  # Q的现实值

        predict_Q = self.model.forward(obs)  # 获取Q预测值

        # 接下来一步是获取action所对应的Q(s,a)
        action_onehot = paddle.nn.functional.one_hot(action, self.act_dim)  # 将action转onehot向量，比如：3 => [0,0,0,1,0]
        action_onehot = paddle.cast(action_onehot, dtype='float32')
        predict_action_Q = paddle.sum(
            paddle.multiply(action_onehot, predict_Q)  # 逐元素相乘，拿到action对应的 Q(s,a)
            ,
            axis=1)  # 对每行进行求和运算,注意此处进行求和的真正目的其  # 比如：pred_value = [[2.3, 5.7, 1.2, 3.9, 1.4]], action_onehot = [[0,0,0,1,0]]
        # 实是变换维度，类似于矩阵转置。与target形式相同。 #  ==> pred_action_value = [[3.9]]

        # 计算 Q(s,a) 与 target_Q的均方差，得到损失。让一组的输出逼近另一组的输出，是回归问题，故用均方差损失函数
        loss = paddle.nn.functional.square_error_cost(predict_action_Q, target)
        cost = paddle.mean(loss)
        cost.backward()  # 反向传播
        self.optimizer.step()  # 更新参数
        self.optimizer.clear_grad()  # 清除梯度

    def sync_target(self):
        self.target_model = copy.deepcopy(self.model)