import random
import gym
import numpy as np
import collections
from tqdm import tqdm
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import env


class ReplayBuffer:
    ''' 经验回放池 '''

    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity)  # 队列,先进先出

    def add(self, state, node, vec, reward, next_state):  # 将数据加入buffer
        self.buffer.append((state, node, vec, reward, next_state))

    def sample(self, batch_size):  # 从buffer中采样数据,数量为batch_size
        transitions = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = zip(*transitions)
        return np.array(state), action, reward, np.array(next_state), done

    def size(self):  # 目前buffer中数据的数量
        return len(self.buffer)


class QnetNode(torch.nn.Module):
    def __init__(self, state_dim, hidden_dim, node_num):
        super().__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, node_num)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        return F.softmax(self.fc2(x), dim=1)


class QnetVec(torch.nn.Module):
    def __init__(self, state_dim, hidden_dim, vehicle_num):
        super().__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, vehicle_num)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        return F.softmax(self.fc2(x), dim=1)


class DQN:
    ''' DQN算法 '''

    def __init__(self, state_dim, hidden_dim, node_num, vec_num, learning_rate, gamma,
                 epsilon, target_update, device):
        self.node_num = node_num
        self.vec_num = vec_num
        self.q_net_node = QnetNode(state_dim, hidden_dim,
                                   self.node_num).to(device)  # Q网络
        self.q_net_vec = QnetVec(state_dim, hidden_dim, vec_num).to(device)
        # 目标网络
        self.target_q_net_node = QnetNode(state_dim, hidden_dim,
                                          self.node_num).to(device)
        self.target_q_net_vec = QnetVec(state_dim, hidden_dim,
                                        self.vec_num).to(device)
        # 使用Adam优化器
        self.optimizer1 = torch.optim.Adam(self.q_net_node.parameters(),
                                           lr=learning_rate)
        self.optimizer2 = torch.optim.Adam(self.q_net_vec.parameters(),
                                           lr=learning_rate)
        self.gamma = gamma  # 折扣因子
        self.epsilon = epsilon  # epsilon-贪婪策略
        self.target_update = target_update  # 目标网络更新频率
        self.count = 0  # 计数器,记录更新次数
        self.device = device

    def get_node(self, state):  # epsilon-贪婪策略采取动作
        state = state.to(device)
        probs = self.q_net_node(state)
        probs = torch.mul(probs, torch.logical_not(state[:, :21]))
        node_dist = torch.distributions.Categorical(probs)
        node = node_dist.sample()
        node = node.unsqueeze(1)
        return node

    def get_vehicle(self, state):
        state = state.to(self.device)
        probs = self.q_net_vec(state)
        vehicle_dist = torch.distributions.Categorical(probs)
        vehicle = vehicle_dist.sample()
        vehicle = vehicle.unsqueeze(1)
        return vehicle

    def update(self, transition_dict):
        states = torch.tensor(transition_dict['states'],
                              dtype=torch.float).to(self.device)
        nodes = torch.tensor(transition_dict['nodes']).to(
            self.device)
        vecs = torch.tensor(transition_dict['vecs'],
                            dtype=torch.float).to(self.device)
        rewards = torch.tensor(transition_dict['rewards'],
                               dtype=torch.float).to(self.device)
        next_states = torch.tensor(transition_dict['next_states'],
                                   dtype=torch.float).to(self.device)
        # states = states[-1]
        # print(self.q_net_node(states).shape)
        # print(states.shape)
        # print(nodes.shape)
        # states (64,1,63)
        # nodes (64,1)
        state = states[-1]
        node = nodes[0].view(-1, 1)
        q_values = self.q_net_node(state).gather(1, node)  # Q值
        # 下个状态的最大Q值
        next_state = torch.tensor(next_states[-1].view(-1, 1)) + torch.tensor(np.zeros((1, 63)), dtype=torch.float)
        max_next_q_values = self.target_q_net_node(next_state).max(1)[0].view(
            -1, 1)
        q_targets = rewards + self.gamma * max_next_q_values  # TD误差目标
        dqn_loss = torch.mean(F.mse_loss(q_values, q_targets))  # 均方误差损失函数
        self.optimizer1.zero_grad()  # PyTorch中默认梯度会累积,这里需要显式将梯度置为0
        dqn_loss.backward()  # 反向传播更新参数
        self.optimizer1.step()

        if self.count % self.target_update == 0:
            self.target_q_net_node.load_state_dict(
                self.q_net_node.state_dict())  # 更新目标网络
        vec = vecs[0].view(-1, 1)
        vec = vec.to(torch.int64)
        q_values = self.q_net_vec(state).gather(1, vec)  # Q值
        # 下个状态的最大Q值
        max_next_q_values = self.target_q_net_vec(next_state).max(1)[0].view(
            -1, 1)
        q_targets = rewards + self.gamma * max_next_q_values  # TD误差目标
        dqn_loss = torch.mean(F.mse_loss(q_values, q_targets))  # 均方误差损失函数
        self.optimizer2.zero_grad()  # PyTorch中默认梯度会累积,这里需要显式将梯度置为0
        dqn_loss.backward()  # 反向传播更新参数
        self.optimizer2.step()

        if self.count % self.target_update == 0:
            self.target_q_net_vec.load_state_dict(
                self.q_net_vec.state_dict())  # 更新目标网络
        self.count += 1


lr = 3e-3
num_episodes = 500
hidden_dim = 128
gamma = 0.98
epsilon = 0.01
target_update = 10
buffer_size = 10000
minimal_size = 500
batch_size = 64
device = torch.device("cpu")

env = env.Env()
# random.seed(0)
# np.random.seed(0)
# env.seed(0)
# torch.manual_seed(0)
replay_buffer = ReplayBuffer(buffer_size)
state_dim = 21 * 3
node_num = 21
vec_num = 3
agent = DQN(state_dim, hidden_dim, node_num, vec_num, lr, gamma, epsilon,
            target_update, device)

return_list = []
for i in range(30):
    with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar:
        for i_episode in range(int(num_episodes / 10)):
            episode_return = 0
            _, state = env.reset()
            state = state.view(1, -1)
            done = False
            while not done:
                state = state.view(1, -1)
                node = agent.get_node(state)
                vec = agent.get_vehicle(state)
                _, next_state, reward, done = env.step(vec, node)
                replay_buffer.add(state, node, vec, reward, next_state)
                state = next_state
                episode_return += reward
                # 当buffer数据的数量超过一定值后,才进行Q网络训练
                if replay_buffer.size() > minimal_size:
                    b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                    transition_dict = {
                        'states': b_s,
                        'nodes': b_a,
                        'vecs': b_r,
                        'rewards': b_ns,
                        'next_states': b_r,
                    }
                    agent.update(transition_dict)
            return_list.append(episode_return)
            if (i_episode + 1) % 10 == 0:
                pbar.set_postfix({
                    'episode':
                        '%d' % (num_episodes / 10 * i + i_episode + 1),
                    'return':
                        '%.3f' % np.mean(return_list[-10:])
                })
            pbar.update(1)
