import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from controller.buffer import ReplayBuffer

class DeepQNetwork(nn.Module):
    def __init__(self, lr, state_dim, action_dim, fc1_dim, fc2_dim):
        super(DeepQNetwork, self).__init__()

        self.fc1 = nn.Linear(state_dim, fc1_dim)
        self.fc2 = nn.Linear(fc1_dim, fc2_dim)
        self.q = nn.Linear(fc2_dim, action_dim)
        self.optimizer = optim.Adam(self.parameters(), lr=lr)

    def forward(self, state):
        x = torch.relu(self.fc1(state))
        x = torch.relu(self.fc2(x))
        x = self.q(x)
        return x

    def save_checkpoint(self, checkpoint_file):
        torch.save(self.state_dict(), checkpoint_file, _use_new_zipfile_serialization=False)

    def load_checkpoint(self, checkpoint_file):
        self.load_state_dict(torch.load(checkpoint_file))


class Agent:
    def __init__(self, state_dim, action_dim, fc1_dim, fc2_dim, lr=0.0001,
                 gamma=0.99, tau=0.005, epsilon=1.0, eps_end=0.01, eps_dec=1e-4,
                 max_size=1000000, batch_size=256):
        self.tau = tau  # 从评估网络到目标网络的学习率
        self.gamma = gamma  # 折扣率
        self.epsilon = epsilon  # 随机选择策略的（初始）概率
        self.eps_min = eps_end  # 随机选择策略的最小概率
        self.eps_dec = eps_dec  # 随机选择策略概率每回合的减小量
        self.batch_size = batch_size
        self.action_space = [i for i in range(action_dim)]

        self.q_eval = DeepQNetwork(lr=lr, state_dim=state_dim, action_dim=action_dim,
                                   fc1_dim=fc1_dim, fc2_dim=fc2_dim)
        self.q_target = DeepQNetwork(lr=lr, state_dim=state_dim, action_dim=action_dim,
                                     fc1_dim=fc1_dim, fc2_dim=fc2_dim)
        self.memory = ReplayBuffer(state_dim=state_dim, max_size=max_size, batch_size=batch_size)
        self.update_network_parameters(tau=1.0)

    def update_network_parameters(self, tau=None):
        if tau is None:
            tau = self.tau
        for q_target_params, q_eval_params in zip(self.q_target.parameters(), self.q_eval.parameters()):
            q_target_params.data.copy_(tau * q_eval_params + (1 - tau) * q_target_params)

    def remember(self, state, action, reward, state_, done):
        self.memory.store_transition(state, action, reward, state_, done)

    def choose_action(self, observation, isTrain=True):
        state = torch.tensor(observation, dtype=torch.float)
        actions = self.q_eval.forward(state)
        action = torch.argmax(actions).item()
        if (np.random.random() < self.epsilon) and isTrain:
            action = np.random.choice(self.action_space)
        return float(action)

    def learn(self):
        if not self.memory.ready():
            return

        states, actions, rewards, next_states, terminals = self.memory.sample_buffer()
        batch_idx = np.arange(self.batch_size)

        states_tensor = torch.tensor(states, dtype=torch.float)
        rewards_tensor = torch.tensor(rewards, dtype=torch.float)
        next_states_tensor = torch.tensor(next_states, dtype=torch.float)
        terminals_tensor = torch.tensor(terminals)

        with torch.no_grad():
            q_ = self.q_target.forward(next_states_tensor)  # 目标网络的动作价值函数
            q_[terminals_tensor] = 0.0
            # TD目标 = 当前回合奖励 + 折扣率 * 最大动作价值函数
            target = rewards_tensor + self.gamma * torch.max(q_, dim=-1)[0]
        y = self.q_eval.forward(states_tensor)
        q = y[batch_idx, actions]  # 当前回合的预测
        loss = F.mse_loss(q, target.detach())  # TD误差
        self.q_eval.optimizer.zero_grad()
        loss.backward()
        self.q_eval.optimizer.step()
        self.update_network_parameters()
        self.epsilon = self.epsilon - self.eps_dec if self.epsilon > self.eps_min else self.eps_min

    def save_models(self):
        self.q_eval.save_checkpoint('DQN_q_eval.pth')
        self.q_target.save_checkpoint('DQN_Q_target.pth')
        print('Saving network successfully!')

    def load_models(self):
        self.q_eval.load_checkpoint('DQN_q_eval.pth')
        self.q_target.load_checkpoint('DQN_Q_target.pth')
        print('Loading Q_target network successfully!')
