import torch
import torch.nn as nn
import torch.nn.functional as F
from network.noisy_layer import NoisyLinear


class NaiveNet(nn.Module):
    """
    朴素网络结构
    """
    def __init__(self, n_feature, n_neurons, n_actions):
        super(NaiveNet, self).__init__()
        self.fc1 = nn.Linear(n_feature, n_neurons)
        self.fc1.weight.data.normal_(0, 0.1)  # initialization
        self.out = nn.Linear(n_neurons, n_actions)
        self.out.weight.data.normal_(0, 0.1)  # initialization

    def forward(self, x):
        x = self.fc1(x)
        x = F.relu(x)
        actions_value = self.out(x)
        next_action = actions_value[:, -1, :]
        return next_action


class TwoLSTMNet(nn.Module):
    """
    多输入单输出的双LSTM结构
    """
    def __init__(self, n_feature, n_neurons, n_actions):
        super(TwoLSTMNet, self).__init__()
        self.lstm1 = nn.LSTM(input_size=2, num_layers=1, hidden_size=n_neurons, batch_first=True)
        self.lstm2 = nn.LSTM(input_size=n_feature-2, num_layers=1, hidden_size=n_neurons, batch_first=True)
        self.dense1 = nn.Linear(in_features=n_neurons*2, out_features=n_actions)

    def forward(self, x):
        out1, _ = self.lstm1(x[:, :, :2])
        out1 = out1[:, -1, :]
        out2, _ = self.lstm2(x[:, :, 2:])
        out2 = out2[:, -1, :]
        out = torch.cat([out1, out2], 1)
        out = F.softmax(self.dense1(out), dim=1)
        return out


class LSTMNet(nn.Module):
    """
    单个LSTM结构
    """
    def __init__(self, n_feature, n_neurons, n_actions, n_layer=1):
        super(LSTMNet, self).__init__()
        self.lstm = nn.LSTM(input_size=n_feature, num_layers=n_layer, hidden_size=n_neurons, batch_first=True)
        self.dense = nn.Linear(in_features=n_neurons, out_features=n_actions)

    def forward(self, x):
        out, _ = self.lstm(x)
        out = out[:, -1, :]
        out = F.softmax(self.dense(out), dim=1)
        return out


class CNNLSTMNet(nn.Module):
    def __init__(self, n_length, n_feature, n_neurons, n_actions, n_layer=1, kernel_size=5, pool_size=2):
        super(CNNLSTMNet, self).__init__()
        self.cnn = nn.Conv1d(in_channels=n_length, out_channels=n_length//2, kernel_size=kernel_size)
        self.max_pooling = nn.MaxPool1d(kernel_size=pool_size)
        self.lstm = nn.LSTM(input_size=(n_feature-kernel_size+1)//pool_size, num_layers=n_layer, hidden_size=n_neurons,
                            batch_first=True)
        self.dense = nn.Linear(in_features=n_neurons, out_features=n_actions)

    def forward(self, x):
        cnn_out = self.cnn(x)
        pooling_out = self.max_pooling(cnn_out)
        out, _ = self.lstm(pooling_out)
        out = out[:, -1, :]
        out = F.softmax(self.dense(out), dim=1)
        return out


class DuelingNoisyLSTMNet(nn.Module):
    def __init__(self, n_feature, n_neurons, n_actions, n_layer=1):
        super(DuelingNoisyLSTMNet, self).__init__()
        self.feature_lstm = nn.LSTM(input_size=n_feature, num_layers=n_layer, hidden_size=n_neurons, batch_first=True)

        # feature layer
        self.advantage_layer = NoisyLinear(in_features=n_neurons, out_features=n_actions)

        # value layer
        self.value_layer = NoisyLinear(in_features=n_neurons, out_features=1)

    def forward(self, x):
        out, _ = self.feature_lstm(x)
        feature = out[:, -1, :]

        value = F.softmax(self.value_layer(feature), dim=1)
        advantage = F.softmax(self.advantage_layer(feature), dim=1)

        q = value + advantage - advantage.mean(dim=-1, keepdim=True)
        return q

    def reset_noise(self):
        self.advantage_layer.reset_noise()
        self.value_layer.reset_noise()


if __name__ == '__main__':
    data = torch.rand((2, 30, 60))
    lstm_net = DuelingNoisyLSTMNet(n_feature=60, n_neurons=1, n_actions=1)
    y = lstm_net(data)
    print(y)

    # cnn_net = CNNLSTMNet(n_length=30, n_feature=60, n_neurons=1, n_actions=1)
    # y = cnn_net(data)
    # print(y)
