import torch.nn as nn
import torch
import numpy as np
from collections import deque
import random
from src.Utils.MyUtil import init_weights
from src.TiDE.TiDE import ResidualBlock
from src.RL.utils import ReplayBuffer


class QNetworkWithLSTM(nn.Module):
    def __init__(self, state_width=18, action_size=2, dropout=0.05):
        super(QNetworkWithLSTM, self).__init__()
        self.input = nn.Sequential(
            ResidualBlock(state_width, 16, dropout=dropout)
        )
        self.lstm1 = nn.LSTM(input_size=16, hidden_size=32, num_layers=1, batch_first=True)
        self.lstm2 = nn.LSTM(input_size=32, hidden_size=16, num_layers=1, batch_first=True)
        self.attention = nn.Sequential(
            nn.Linear(16, 32),
            nn.ReLU(),
            nn.Linear(32, 1),
            nn.Softmax(dim=1),
            nn.Dropout(dropout)
        )
        self.output = nn.Sequential(
            nn.Linear(16, 16),
            nn.LeakyReLU(),
            nn.Dropout(dropout)
        )
        self.value = nn.Sequential(
            nn.Linear(16, 1)
        )
        self.advantage = nn.Sequential(
            nn.Linear(16, action_size),
        )
        # self.softmax = nn.Softmax(dim=1)

    def forward(self, x, position):
        x = self.input(x)
        x, _ = self.lstm1(x)
        x, _ = self.lstm2(x)
        # weight = self.attention(x)
        # x = torch.sum(weight * x, dim=len(x.shape) - 2, keepdim=True)
        # x = torch.squeeze(x, dim=1)
        x = x[:, -1, :]
        # x = torch.cat((x, position), dim=len(x.shape) - 1)
        x = self.output(x)
        val = self.value(x)
        adv = self.advantage(x)
        x = val + adv - torch.mean(adv)
        # x = self.softmax(x)
        return x


# 定义DQN类 (定义两个网络)
class DQN(object):
    STATE_WIDTH = 13
    GAMMA = 0.95
    MEMORY_CAPACITY = 1500
    epsilon = 0.8
    EPSILON_DECAY = 0.999
    BATCH_SIZE = 64
    LR = 0.0005
    N_ACTIONS = 2
    TARGET_REPLACE_ITER = 800
    MIN_EPSILON = 0.05
    loss_list = []
    MIN_SOFT_UPDATE_RATE = 0.6
    SOFT_UPDATE_DECAY = 0.9
    soft_update_rate = 0.9

    def __init__(self, model_weight_path=None, QNetWork=QNetworkWithLSTM):  # 定义DQN的一系列属性

        self.eval_net, self.target_net = QNetWork(state_width=DQN.STATE_WIDTH,
                                                  action_size=DQN.N_ACTIONS).cuda(), QNetWork(
            state_width=DQN.STATE_WIDTH,
            action_size=DQN.N_ACTIONS).cuda()  # 利用Net创建两个神经网络: 评估网络和目标网络
        if model_weight_path is not None:
            state_dic = torch.load(model_weight_path)
            self.eval_net.load_state_dict(state_dic, strict=False)
            self.target_net.load_state_dict(state_dic, strict=False)
        init_weights(self.eval_net)
        init_weights(self.target_net)
        self.learn_step_counter = 0  # for target updating

        self.replay_buffer = ReplayBuffer(DQN.MEMORY_CAPACITY)

        self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=DQN.LR)  # 使用Adam优化器 (输入为评估网络的参数和学习率)
        self.loss_func = nn.MSELoss().cuda()  # 使用均方损失函数 (loss(xi, yi)=(xi-yi)^2)

    def choose_action(self, x):  # 定义动作选择函数 (x为状态)
        if np.random.uniform() > DQN.epsilon:  # 生成一个在[0, 1)内的随机数，如果小于EPSILON，选择最优动作
            actions_value = self.eval_net.forward(*x)  # 通过对评估网络输入状态x，前向传播获得动作值
            action = torch.max(actions_value, 1)[1].data.cpu().numpy()  # 输出每一行最大值的索引，并转化为numpy ndarray形式
            action = action[0]  # 输出action的第一个数
        else:  # 随机选择动作
            action = np.random.randint(0, DQN.N_ACTIONS)
        return action  # 返回选择的动作

    def store_transition(self, s, a, r, s_, done):  # 定义记忆存储函数 (这里输入为一个transition)
        self.replay_buffer.add(s, a, r, s_, done)

    def learn(self):  # 定义学习函数(记忆库已满后便开始学习)
        if not self.replay_buffer.is_full():  # 记忆未满则直接返回
            return

        # 目标网络参数更新
        if self.learn_step_counter % DQN.TARGET_REPLACE_ITER == 0:  # 一开始触发，然后每100步触发
            soft_update(local_network=self.eval_net, target_network=self.target_net, tau=DQN.soft_update_rate)
            DQN.soft_update_rate = max(DQN.MIN_SOFT_UPDATE_RATE, DQN.soft_update_rate * DQN.SOFT_UPDATE_DECAY)
            # self.target_net.load_state_dict(self.eval_net.state_dict())  # 将评估网络的参数赋给目标网络
        self.learn_step_counter += 1  # 学习步数自加1

        self.optimizer.zero_grad()  # 清空上一步的残余更新参数值
        # 抽取记忆库中的批数据
        b_s, b_a, b_r, b_s_, done = self.replay_buffer.sample(DQN.BATCH_SIZE)
        # 获取transition的评估值和目标值，并利用损失函数和优化器进行评估网络参数更新
        q_eval = self.eval_net(*b_s).gather(1, b_a)
        # eval_net(b_s)通过评估网络输出32行每个b_s对应的一系列动作值，然后.gather(1, b_a)代表对每行对应索引b_a的Q值提取进行聚合
        q_next = self.target_net(*b_s_).detach()
        # q_next不进行反向传递误差，所以detach；q_next表示通过目标网络输出32行每个b_s_对应的一系列动作值

        q_target = b_r + (1 - done) * (DQN.GAMMA * q_next.max(1)[0].view(DQN.BATCH_SIZE, 1))
        # q_target = torch.nn.Sigmoid()(q_target)
        # q_next.max(1)[0]表示只返回每一行的最大值，不返回索引(长度为batch的一维张量)；.view()表示把前面所得到的一维张量变成(BATCH_SIZE, 1)的形状；最终通过公式得到目标值
        # DQN.q_target_list.append(np.mean(q_target.cpu().numpy()))
        from src.RL.EasyNet import EasyQNet
        loss = self.loss_func(q_eval, q_target)
        if isinstance(self.eval_net, EasyQNet):
            loss += self.eval_net.l1_penalty()
        # 输入评估值和目标值，使用均方损失函数
        DQN.loss_list.append(loss.item())
        # print(loss.item())
        if len(DQN.loss_list) % 1007 == 1001:
            print(f"Loss:{np.mean(DQN.loss_list[-1000:]):.5f}")
        loss.backward()  # 误差反向传播, 计算参数更新值
        self.optimizer.step()  # 更新评估网络的所有参数

        DQN.epsilon = max(DQN.MIN_EPSILON, DQN.epsilon * DQN.EPSILON_DECAY)

    def save(self, path):
        torch.save(self.target_net.state_dict(), path)

    def eval(self):
        DQN.no_epsilon()
        self.target_net.eval()
        self.eval_net.eval()

    @classmethod
    def no_epsilon(cls):
        cls.epsilon = 0
        cls.MIN_EPSILON = 0

    @classmethod
    def set_soft_update(cls, tar):
        cls.MIN_SOFT_UPDATE_RATE = tar
        cls.soft_update_rate = tar


def soft_update(local_network, target_network, tau):
    for target_param, local_param in zip(target_network.parameters(), local_network.parameters()):
        target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
