import torch
import torch.nn as nn
import torch.nn.functional as F


class Qnet(torch.nn.Module):
    """
    只有一层隐藏层的Q网络(DQN)
    虽然是估计Q(s,a),但是这里输入的只是状态，而没有动作，因为这种做法可以输出状态下，所有动作的价值
    """

    def __init__(self, state_dim, hidden_dim, action_dim, hidden_layers, activation_layer=torch.nn.ReLU):
        """
        搭建网络
        :param state_dim: 状态维度
        :param hidden_dim: 隐藏层神经元个数
        :param action_dim: 动作维度
        :param hidden_layers: 隐藏层个数
        :param activation_layer: 使用的激活函数，默认使用Relu()
        """
        super(Qnet, self).__init__()
        layers = []
        # 输入层
        layers.append(torch.nn.Linear(state_dim, hidden_dim))
        # 激活曾
        layers.append(activation_layer())
        # 隐藏层
        for i in range(hidden_layers):
            layers.append(torch.nn.Linear(hidden_dim, hidden_dim))
            layers.append(activation_layer())
        # 输出层
        layers.append(torch.nn.Linear(hidden_dim, action_dim))
        # 转成Sequential 容器
        self.linear_layers = torch.nn.Sequential(*layers)

    def forward(self, x):
        x = self.linear_layers(x)
        return x


class VAnet(torch.nn.Module):
    """Dueling DQN使用的Q网络架构：只有一层隐藏层的A网络（优势网络）和V网络（状态价值网络）"""

    def __init__(self, state_dim, hidden_dim, action_dim, hidden_layers, activation_layer=torch.nn.ReLU):
        super(VAnet, self).__init__()

        # 前面共享的网络部分
        layers = []
        layers.append(torch.nn.Linear(state_dim, hidden_dim))
        # 激活曾
        layers.append(activation_layer())
        # 隐藏层
        for i in range(hidden_layers):
            layers.append(torch.nn.Linear(hidden_dim, hidden_dim))
            layers.append(activation_layer())
        # 转成Sequential 容器
        self.linear_layers = torch.nn.Sequential(*layers)

        # A网络
        self.fc_A = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            torch.nn.Linear(hidden_dim, action_dim)
        )
        # V网络
        self.fc_V = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            torch.nn.Linear(hidden_dim, 1)
        )

    def forward(self, x):
        A = self.fc_A(self.linear_layers(x))
        V = self.fc_V(self.linear_layers(x))
        # Q值由V值和A值计算得到 这里对对A求均值时为了保证V值建模的唯一性，并且更加稳定
        Q = V + A - A.mean(1).view(-1, 1)
        return Q


class PolicyNetContinuous(torch.nn.Module):
    """用于连续动作空间的策略网络(PPO)"""

    def __init__(self, state_dim, hidden_state, action_dim):
        super(PolicyNetContinuous, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_state)
        self.fc_mu = torch.nn.Linear(hidden_state, action_dim)
        self.fc_std = torch.nn.Linear(hidden_state, action_dim)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        mu = 2.0 * torch.tanh(self.fc_mu(x))
        # softplus : log(1+e^x) 是平滑版的RELU
        std = F.softplus(self.fc_std(x))
        return mu, std


class PolicyNet(torch.nn.Module):
    """PPO可用的策略网络"""

    def __init__(self, state_dim, hidden_state, action_dim, hidden_layers, activation_layer=torch.nn.ReLU):
        super(PolicyNet, self).__init__()
        layers = [torch.nn.Linear(state_dim, hidden_state), activation_layer()]
        for i in range(hidden_layers):
            layers.append(torch.nn.Linear(hidden_state, hidden_state))
            layers.append(activation_layer())
        layers.append(torch.nn.Linear(hidden_state, action_dim))
        self.linear_layers = torch.nn.Sequential(*layers)

    def forward(self, x):
        x = self.linear_layers(x)
        # 用softmax得到动作分布 P(a|s) --(每个动作被选择的相应概率)
        return F.softmax(x, dim=1)


class ValueNet(torch.nn.Module):
    """Actor-critic中的critic Net"""

    def __init__(self, state_dim, hidden_dim, hidden_layers, activation_layer=torch.nn.ReLU):
        super(ValueNet, self).__init__()

        layers = [torch.nn.Linear(state_dim, hidden_dim), activation_layer()]
        for i in range(hidden_layers):
            layers.append(torch.nn.Linear(hidden_dim, hidden_dim))
            layers.append(activation_layer())
        layers.append(torch.nn.Linear(hidden_dim, 1))
        self.linear_layers = torch.nn.Sequential(*layers)

        # self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
        # self.fc2 = torch.nn.Linear(hidden_dim, 1)

    def forward(self, x):
        x = self.linear_layers(x)
        return x
