import torch
import torch.optim as optim
import math
import random

class DQN:
    def __init__(
            self,
            model,
            buffer,
            n_act:int,
            gamma:float,
            e_start:float,
            e_end:float,
            e_decay:float,
            batch_size:int,
            lr:float
        ):
        self.device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        self.n_actions = n_act
        self.gamma =gamma 
        self.sample_count = 0 
        # 1-epsilon policy
        self.epsilon = e_start
        self.sample_count = 0  
        self.epsilon_start = e_start
        self.epsilon_end = e_end
        self.epsilon_decay = e_decay
        self.batch_size = batch_size
        self.policy_net = model.to(self.device)
        self.target_net = model.to(self.device)
        # 复制参数到目标网络
        for target_param, param in zip(self.target_net.parameters(),self.policy_net.parameters()): 
            target_param.data.copy_(param.data)
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=lr) # 优化器
        self.buffer = buffer # 经验回放
    def sample_action(self, state):
        self.sample_count += 1
        # epsilon指数衰减
        self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
            math.exp(-1. * self.sample_count / self.epsilon_decay) 
        if random.random() > self.epsilon:
            with torch.no_grad():
                state = torch.tensor(state, device=self.device,dtype=torch.float32).unsqueeze(dim=0)
                q_values = self.policy_net(state)
                action = q_values.max(1)[1].item()
        else:
            action = random.randrange(self.n_actions)
        return action
    @torch.no_grad()
    def predict_action(self, state):
        state = torch.tensor(state,device=self.device,dtype=torch.float32).unsqueeze(dim=0)
        q_values = self.policy_net(state)
        action = q_values.max(1)[1].item()
        return action
    def update(self):
        if(self.buffer.num<self.batch_size):
            return
        batch=self.buffer.sample(self.batch_size)
        state,act,reward,next_state
        ter,tru=batch['obs'],batch['act'],batch['reward'],batch['next_obs'],batch['ter'],batch['tru']
        state=state.to(self.device)
        act=act.to(self.device)
        reward=reward.to(self.device)
        next_state=next_state.to(self.device)
        ter=ter.to(self.device)
        tru=tru.to(self.device)
        # M.gather(dim=n,index=M')，在第n个维度中以M'为索引收集M的元素
        #act_long=act.long().to(self.device)
        q_values = self.policy_net(state).gather(dim=1, index=act.long()) # 计算当前状态(s_t,a)对应的Q(s_t, a)
        #next_q_values = self.target_net(next_state).max(1)[0].detach() # 计算下一时刻的状态(s_t_,a)对应的Q值
        next_q_values=self.policy_net(next_state)
        next_target_value = self.target_net(next_state)
        next_target_q_value = next_target_value.gather(dim=1, index=torch.max(next_q_values, 1)[1].unsqueeze(1).long())
        # 计算期望的Q值，对于终止状态，此时done_batch[0]=1, 对应的expected_q_value等于reward
        expected_q_values = reward.unsqueeze(1) + self.gamma * next_target_q_value * (1-ter.unsqueeze(1))*(1-tru.unsqueeze(1))
        #loss = torch.nn.MSELoss()(q_values, expected_q_values.unsqueeze(1))  # 计算均方根损失
        loss = torch.nn.MSELoss()(q_values, expected_q_values)
        self.optimizer.zero_grad()  
        loss.backward()
        # 使用buffer normalize可以不用梯度截断
        #for param in self.policy_net.parameters():  
            #param.grad.data.clamp_(-1, 1)
        self.optimizer.step()
