import torch
import torch.nn as nn


class Generator(nn.Module):
    def __init__(self,
                 generator_input_size=200,
                 num_historical_days=20,
                 num_features=9,
                 dropout=0,
                 **kwargs):
        super(Generator, self).__init__()
        self.num_historical_days = num_historical_days
        self.fc1 = nn.Sequential(
            nn.Linear(generator_input_size, 64),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        self.lstm = nn.Sequential(
            nn.LSTM(64, hidden_size=48, num_layers=3, dropout=dropout)
        )
        self.fc2 = nn.Sequential(
            nn.Linear(48, 64),
            nn.ReLU(),
            nn.Dropout(dropout)
        )

        self.fc3 = nn.Sequential(
            nn.Linear(64, num_features)
        )

    def forward(self, x):
        x = self.fc1(x)
        x, _ = self.lstm(x)
        x = self.fc2(x)
        x = self.fc3(x)
        return x


class Discriminator(nn.Module):

    def __init__(self, num_feature=5,
                 LSTM_features=64,
                 LSTM_hidden_size=256,
                 LSTM_num_layers=3,
                 dropout=0,
                 discriminator_features=128,
                 isWGAN=False,
                 **kwargs):
        super(Discriminator, self).__init__()
        self.input = nn.Sequential(
            nn.Linear(num_feature, LSTM_features)
        )

        self.feature_model = FeatureModel(LSTM_features=LSTM_features,
                                          LSTM_hidden_size=LSTM_hidden_size,
                                          LSTM_num_layers=LSTM_num_layers,
                                          dropout=dropout,
                                          discriminator_features=discriminator_features)
        self.fc2 = nn.Sequential(
            nn.Linear(discriminator_features, 1),
            nn.Sigmoid()
        ) if not isWGAN else nn.Sequential(
            nn.Linear(discriminator_features, 1)
        )

    def forward(self, x):
        # x = x.cpu().detach().numpy()
        # x = count_feature4numpy(x)
        x = self.input(x)
        encode = self.feature_model(x)
        x = self.fc2(encode)
        return torch.reshape(x, [-1, 1])


class FeatureModel(nn.Module):
    def __init__(self, LSTM_features=64,
                 LSTM_hidden_size=256,
                 LSTM_num_layers=3,
                 discriminator_features=128,
                 dropout=0,
                 **kwargs
                 ):
        super(FeatureModel, self).__init__()
        self.lstm = nn.Sequential(
            nn.LSTM(LSTM_features, hidden_size=LSTM_hidden_size, num_layers=LSTM_num_layers),
        )
        self.attention = nn.Sequential(
            nn.Linear(LSTM_hidden_size, 512),
            nn.ReLU(),
            nn.Linear(512, 1),
            nn.Softmax(dim=1),
            nn.Dropout(dropout)
        )
        self.fc1 = nn.Sequential(
            nn.Linear(LSTM_hidden_size, discriminator_features)
        )

    def forward(self, x):
        x, _ = self.lstm(x)
        weight = self.attention(x)
        x = torch.sum(weight * x, dim=len(x.shape) - 2, keepdim=True)
        x = self.fc1(x)
        return torch.squeeze(x)
