import torch
import torch.nn as nn
import torch.nn.functional as F


class Qnet(torch.nn.Module):
    """
    只有一层隐藏层的Q网络(DQN)
    虽然是估计Q(s,a),但是这里输入的只是状态，而没有动作，因为这种做法可以输出状态下，所有动作的价值
    """

    def __init__(self, state_dim, hidden_dim, action_dim):
        super(Qnet, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, action_dim)

    def forward(self, x):
        # 隐藏层使用ReLU激活函数
        x = F.relu(self.fc1(x))
        return self.fc2(x)


class ConvolutionQnet(torch.nn.Module):
    def __init__(self, input_shape, action_dim, in_channels=4):
        super(ConvolutionQnet, self).__init__()

        self.input_shape = input_shape
        self.features = torch.nn.Sequential(
            nn.Conv2d(in_channels, 32, kernel_size=8, stride=4),
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=4, stride=2),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, stride=1),
            nn.ReLU()
        )

        self.fc = nn.Sequential(
            nn.Linear(self.feature_size, 512),
            nn.ReLU(),
            nn.Linear(512, action_dim)
        )

    def forward(self, x):
        x = self.features(x).view(x.size()[0], -1)
        return self.fc(x)

    @property
    def feature_size(self):
        x = self.features(torch.zeros(1, *self.input_shape))
        return x.view(1, -1).size(1)


class VAnet(torch.nn.Module):
    """Dueling DQN使用的Q网络架构：只有一层隐藏层的A网络（优势网络）和V网络（状态价值网络）"""

    def __init__(self, state_dim, hidden_dim, action_dim):
        super(VAnet, self).__init__()
        # 前面共享的网络部分
        self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
        # A网络
        self.fc_A = torch.nn.Linear(hidden_dim, action_dim)
        # V网络
        self.fc_V = torch.nn.Linear(hidden_dim, 1)

    def forward(self, x):
        A = self.fc_A(F.relu(self.fc1(x)))
        V = self.fc_V(F.relu(self.fc1(x)))
        # Q值由V值和A值计算得到 这里对对A求均值时为了保证V值建模的唯一性，并且更加稳定
        Q = V + A - A.mean(1).view(-1, 1)
        return Q


class ConvolutionNet(torch.nn.Module):
    """卷积神经网络，定义卷积层"""

    def __init__(self, in_channels=4):
        super(ConvolutionNet, self).__init__()
        self.features = torch.nn.Sequential(
            nn.Conv2d(in_channels, 32, kernel_size=8, stride=4),
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=4, stride=2),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, stride=1),
            nn.ReLU()
        )


class ConvolutionVAnet(ConvolutionNet):
    def __init__(self, input_shape, action_dim, in_channels=4):
        super(ConvolutionVAnet, self).__init__(in_channels)

        self.input_shape = input_shape
        self.fc = nn.Sequential(
            nn.Linear(self.feature_size, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
        )
        self.fc_A = nn.Linear(256, action_dim)
        self.fc_V = nn.Linear(256, 1)

    def forward(self, x):
        x = self.features(x).view(x.size()[0], -1)
        x = self.fc(x)
        A = self.fc_A(x)
        V = self.fc_V(x)
        Q = A + V - A.mean(1).view(-1, 1)
        return Q

    @property
    def feature_size(self):
        """获得卷积层最后一层的输出形状，好于Linear层做连接。"""
        x = self.features(torch.zeros(1, *self.input_shape))
        return x.view(1, -1).size(1)


class PolicyNet(torch.nn.Module):
    """PPO可用的策略网络"""

    def __init__(self, state_dim, hidden_state, action_dim):
        super(PolicyNet, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_state)
        self.fc2 = torch.nn.Linear(hidden_state, action_dim)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        # 用softmax得到动作分布 P(a|s) --(每个动作被选择的相应概率)
        return F.softmax(self.fc2(x), dim=1)


class PolicyNetDeterministic(torch.nn.Module):
    """用于DDPG的确定性策略网络"""

    def __init__(self, state_dim, hidden_state, action_dim, action_bound):
        super(PolicyNetDeterministic, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_state)
        self.fc2 = torch.nn.Linear(hidden_state, hidden_state)
        self.fc3 = torch.nn.Linear(hidden_state, action_dim)

        # action_bound是环境可以接受的最大值
        self.action_bound = action_bound

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        # 映射会原来的动作空间（因为tanh会将结果压缩到[-1,1])
        return torch.tanh(self.fc3(x)) * self.action_bound

    def init_weights(self, m: torch.nn.Module):
        if type(m) == nn.Linear:
            torch.nn.init.uniform_(m.weight, a=-0.1, b=0.1)
            m.bias.data.fill_(0.01)


class PolicyNetContinuous(torch.nn.Module):
    """用于连续动作空间的策略网络(PPO)"""

    def __init__(self, state_dim, hidden_state, action_dim):
        super(PolicyNetContinuous, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_state)
        self.fc_mu = torch.nn.Linear(hidden_state, action_dim)
        self.fc_std = torch.nn.Linear(hidden_state, action_dim)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        mu = 2.0 * torch.tanh(self.fc_mu(x))
        # softplus : log(1+e^x) 是平滑版的RELU
        std = F.softplus(self.fc_std(x))
        return mu, std


class SACPolicyNet(torch.nn.Module):
    """SAC的策略网络（连续动作情况下）"""

    def __init__(self, state_dim, hidden_dim, action_dim, action_bound):
        super(SACPolicyNet, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
        self.fc_mu = torch.nn.Linear(hidden_dim, action_dim)
        self.fc_std = torch.nn.Linear(hidden_dim, action_dim)

        self.action_bound = action_bound

    def forward(self, x):
        x = F.relu(self.fc1(x))
        mu = self.fc_mu(x)
        std = F.softplus(self.fc_std(x))
        # 构建高斯分布
        dist = torch.distributions.Normal(mu, std)
        # 使用重参数化采样技巧
        normal_space = dist.rsample()
        # 计算log\pi(a|s) 其中a是由重参数化采样技巧得到
        log_prob = dist.log_prob(normal_space)
        action = torch.tanh(normal_space)
        # 计算tanh_normal分布的对数概率密度
        log_prob = log_prob - torch.log(1 - torch.tanh(action).pow(2) + 1e-7)
        action = action * self.action_bound
        return action, log_prob


class SACQValueNet(torch.nn.Module):
    """SAC的价值网络（连续动作情况下）"""
    def __init__(self, state_dim, hidden_dim, action_dim):
        super(SACQValueNet, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim + action_dim, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.fc_out = torch.nn.Linear(hidden_dim, 1)

    def forward(self, state, action):
        cat = torch.cat([state, action], dim=1)
        x = F.relu(self.fc1(cat))
        x = F.relu(self.fc2(x))
        return self.fc_out(x)


class ValueNet(torch.nn.Module):
    """Actor-critic中的critic Net"""

    def __init__(self, state_dim, hidden_dim):
        super(ValueNet, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, 1)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        return self.fc2(x)


class QValueNet(torch.nn.Module):
    """
    用于DDPG（深度确定性策略梯度）的Q网络，此时因为策略是确定性的，所以输出是对应某个状态某个动作的确定值
    """

    def __init__(self, state_dim, hidden_dim, action_dim):
        super(QValueNet, self).__init__()
        self.fc1 = torch.nn.Linear(state_dim + action_dim, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = torch.nn.Linear(hidden_dim, 1)

    def forward(self, state, action):
        # 拼接动作和状态
        x = torch.cat([state, action], dim=1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        return self.fc3(x)


class TwoLayerFC(torch.nn.Module):
    """一个简单的两层神经网络"""

    def __init__(self, num_in, num_out, hidden_dim, activation=F.relu, out_fn=lambda x: x):
        super(TwoLayerFC, self).__init__()
        self.fc1 = torch.nn.Linear(num_in, hidden_dim)
        self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = torch.nn.Linear(hidden_dim, num_out)

        self.activation = activation
        self.out_fn = out_fn

    def forward(self, x):
        x = self.activation(self.fc1(x))
        x = self.activation(self.fc2(x))
        x = self.out_fn(self.fc3(x))
        return x
