import torch
import numpy as np


class Agent(object):
    def __init__(self, algorithm, obs_dim, act_dim, device='cpu'):
        assert isinstance(obs_dim, int)
        assert isinstance(act_dim, int)
        self.alg = algorithm
        self.obs_dim = obs_dim
        self.act_dim = act_dim
        self.device = device

        # 注意：最开始先同步self.model和self.target_model的参数.
        self.alg.sync_target(soft_sync=False)

    def predict(self, obs):
        obs = torch.FloatTensor([obs]).to(self.device)
        act = self.alg.predict(obs)
        act = act.detach().cpu().numpy()
        return act

    def learn(self, obs, action, reward, next_obs, terminal):
        obs = torch.FloatTensor(obs).to(self.device)
        action = torch.FloatTensor(action).to(self.device)
        reward = torch.FloatTensor(reward).to(self.device)
        next_obs = torch.FloatTensor(next_obs).to(self.device)
        terminal = torch.FloatTensor(terminal).to(self.device)
        # 训练一次网络
        _, critic_loss = self.alg.learn(obs, action, reward, next_obs, terminal)
        # 软更新同步一次Q网络
        self.alg.sync_target(soft_sync=True)
        return critic_loss
