import torch
import torch.nn as nn
import os
import torch.nn.functional as F
from convlstm import ConvLSTMCell, ConvLSTM

# The VAE Encoder in SOGMP has a stack of residual connection in front of the convlstm block

POINTS = 1080
IMG_SIZE = 64 
SEQ_LEN = 10

# Define the Residual block
class Residual(nn.Module):
    def __init__(self, in_channels, num_hiddens, num_residual_hiddens):
        self.in_channels = in_channels
        self.num_hiddens = num_hiddens
        self.num_residual_hiddens = num_residual_hiddens
        self.block = nn.Sequential(
            nn.ReLU(True),
            nn.Conv2d(in_channels=self.in_channels,
                      out_channels=self.num_residual_hiddens,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(num_residual_hiddens),
            nn.ReLU(True),
            nn.Conv2d(in_channels=self.num_residual_hiddens,
                      out_channels=self.num_hiddens,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(num_hiddens)
        )

    def forward(self, x):
        return x + self.block(x)
    
# Define the Residual stacks
class ResidualStack(nn.Module):
    def __init__(self, in_channels, num_hiddens, num_residual_hiddens, num_residual_layers):
        self.in_channels = in_channels
        self.num_hiddens = num_hiddens
        self.num_residual_hiddens = num_residual_hiddens
        self.num_residual_layers = num_residual_layers
        self.stacks = nn.ModuleList([Residual(in_channels, num_hiddens, num_residual_layers)
                                     for _ in range(num_residual_layers)])
        
    def forward(self, x):
        for i in range(self.num_residual_layers):
            x = self.stacks[i](x)
        return F.relu(x)
    
# TODO: `modified` Define the LOCAL MAP encoder

# Use ConvLSTM as the default feature extractor as in Pedestrian trajectory prediction tasks
# The feature extractor is LSTM
class Encoder(nn.Module):
    def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
        super(Encoder, self).__init__()
        self._conv_1 = nn.Sequential(*[
                                        nn.Conv2d(in_channels = in_channels,
                                                  out_channels = num_hiddens // 2,
                                                  kernel_size = 4,
                                                  stride = 2, 
                                                  padding = 1),
                                        nn.BatchNorm2d(num_hiddens // 2),
                                        nn.ReLU(True)
                                    ])
        self._conv_2 = nn.Sequential(*[
                                        nn.Conv2d(in_channels = num_hiddens // 2,
                                                  out_channels = num_hiddens,
                                                  kernel_size = 4,
                                                  stride = 2, 
                                                  padding = 1),
                                        nn.BatchNorm2d(num_hiddens)
                                        #nn.ReLU(True)
                                    ])
        self._residual_stack = ResidualStack(in_channels = num_hiddens,
                                             num_hiddens = num_hiddens,
                                             num_residual_layers = num_residual_layers,
                                             num_residual_hiddens = num_residual_hiddens)

    def forward(self, feat):
        x = self._conv_1(feat)
        x = self._conv_2(x)
        x = self._residual_stack(x)
        return x


class EncoderConvLSTM(nn.Module):
    def __init__(self, in_channels, num_hiddens, kernel_size = (3, 3), bias = True, num_layer = 3):
        super(EncoderConvLSTM, self).__init__()
        # Store arguments
        self.in_channels = in_channels
        self.num_hiddens = num_hiddens
        self.kernel_size = kernel_size
        self.bias = bias
        self.num_layer = num_layer

        # the net components
        self._convlstm = ConvLSTM(input_dim = self.in_channels,
                                  hidden_dim = self.num_hiddens,
                                  kernel_size = self.kernel_size,
                                  num_layers = self.num_layer,
                                  batch_first = True,
                                  bias = self.bias)
        
    def forward(self, input_data):
        """
        Forward the `input_image` through the network
        """
        # x = input_data.reshape(-1, SEQ_LEN, 1, IMG_SIZE, IMG_SIZE)
        
        # encoding:
        output, hidden = self._convlstm(input_data)
        return output, hidden


class DecoderConvLSTM(nn.Module):
    def __init__(self, hidden_size, kernel_size = (3, 3), bias = True, num_layer = 1):
        super(DecoderConvLSTM, self).__init__()
        self.hidden_size = hidden_size
        self.num_layer = num_layer
        self.kernel_size = kernel_size

        self._convlstm = ConvLSTM(hidden_size, 
                                  hidden_size,
                                  kernel_size,
                                  num_layer,
                                  True,
                                  bias)
        
    def forward(self, input_data):
        """
        Forward the `input_image` 
        """
        output, hidden = self._convlstm(input_data)
        return output, hidden


class Classifier(nn.Module):
    def __init__(self, in_features, n_clusters, mid_size=128):
        super(Classifier, self).__init__()
        self.classifier = nn.Sequential(
            nn.Linear(in_features, mid_size),
            nn.Tanh(),
            nn.Linear(mid_size, mid_size),
            nn.Tanh()
        )
        self.top_layer = nn.Linear(mid_size, n_clusters)

    def forward(self, data):
        data = self.classifier(data)
        data = self.top_layer(data)
        return torch.softmax(data, dim = 1)
    

class Decoder(nn.Module):
    """
        Connect a convtrans block to the back of the convlstm net
    """
    def __init__(self, out_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
        super(Decoder, self).__init__()
        
        self._residual_stack = ResidualStack(in_channels=num_hiddens,
                                             num_hiddens=num_hiddens,
                                             num_residual_layers=num_residual_layers,
                                             num_residual_hiddens=num_residual_hiddens)

        self._conv_trans_2 = nn.Sequential(*[
                                            nn.ReLU(True),
                                            nn.ConvTranspose2d(in_channels=num_hiddens,
                                                              out_channels=num_hiddens//2,
                                                              kernel_size=4,
                                                              stride=2,
                                                              padding=1),
                                            nn.BatchNorm2d(num_hiddens//2),
                                            nn.ReLU(True)
                                        ])

        self._conv_trans_1 = nn.Sequential(*[
                                            nn.ConvTranspose2d(in_channels=num_hiddens//2,
                                                              out_channels=num_hiddens//2,
                                                              kernel_size=4,
                                                              stride=2,
                                                              padding=1),
                                            nn.BatchNorm2d(num_hiddens//2),
                                            nn.ReLU(True),                  
                                            nn.Conv2d(in_channels=num_hiddens//2,
                                                      out_channels=out_channels,
                                                      kernel_size=3,
                                                      stride=1,
                                                      padding=1),
                                            nn.Sigmoid()
                                        ])

    def forward(self, inputs):
        x = self._residual_stack(inputs)
        x = self._conv_trans_2(x)
        x = self._conv_trans_1(x)
        return x


class Synthesizer(nn.Module):
    def __init__(self, obs_hidden_size, pred_hidden_size):
        super(Synthesizer, self).__init__()
        self.obs_hidden_size = obs_hidden_size
        self.pred_hidden_size = pred_hidden_size

        self.fc1 = nn.Linear(self.obs_hidden_size, self.obs_hidden_size)
        self.fc2 = nn.Linear(self.obs_hidden_size + self.pred_hidden_size, self.pred_hidden_size)

    def forward(self, obs_encoded, labels, all_modalities):
        assert labels is not None and all_modalities is not None
        p1 = all_modalities[labels][:, :self.obs_hidden_size] - obs_encoded
        p1 = torch.sigmoid(self.fc1(p1))
        p2 = all_modalities[labels][:, self.obs_hidden_size:]
        concatenated_data = torch.cat((p1, p2), dim=1)
        return self.fc2(concatenated_data)
    

class PCCSNet(nn.Module):
    def __init__(self, obs_len=8, pre_len=12, n_cluster=200, obs_hidden_size=48, pred_hidden_size=48, num_layer=3):

        super(PCCSNet, self).__init__()

        self.obs_len = obs_len
        self.pre_len = pre_len
        self.obs_hidden_size = obs_hidden_size
        self.pred_hidden_size = pred_hidden_size
        self.num_layer = num_layer

        self.encoder_obs = EncoderConvLSTM(2, self.obs_hidden_size // 2, self.num_layer)
        self.encoder_pre = EncoderConvLSTM(2, self.pred_hidden_size // 2, self.num_layer)
        self.synthesizer = Synthesizer(self.obs_hidden_size, self.pred_hidden_size)
        self.classifier = Classifier(self.obs_hidden_size, n_cluster)
        self.decoder = Decoder(self.pre_len, self.obs_hidden_size // 2, self.pred_hidden_size // 2, self.num_layer)

    def forward(self, obs_encoded, k=None, batch_labels=None, all_modalities=None, obs_feature_len=None):
        bs = len(obs_encoded)
        assert k is not None

        class_probs = self.classifier(obs_encoded)
        prob_list = torch.argsort(class_probs, descending=True)[0]
        if bs != 1:
            raise NotImplementedError
        assert k <= len(prob_list)

        expanded_obs_encoding = obs_encoded.repeat(k, 1)
        predicted_future_encoding = self.synthesizer(expanded_obs_encoding, labels=prob_list[:k],
                                                     all_modalities=all_modalities)
        output = self.decoder([expanded_obs_encoding, predicted_future_encoding])

        return output

    def gen_encoding(self, trj_obs, trj_pred, for_training=True):
        bs, trj_len, _ = trj_obs.shape

        with torch.no_grad():
            trj_obs = trj_obs.transpose(0, 1)
            encoder_obs_hidden = self.encoder_obs.initHidden(bs)
            obs_encoded, _ = self.encoder_obs(trj_obs, encoder_obs_hidden)
            obs_encoding = obs_encoded[-1]

            if for_training:
                trj_pred = trj_pred.transpose(0, 1)
                encoder_pre_hidden = self.encoder_pre.initHidden(bs)
                pre_encoded, _ = self.encoder_pre(trj_pred, encoder_pre_hidden)
                pre_encoding = pre_encoded[-1]
            else:
                pre_encoding = None

        return obs_encoding, pre_encoding

    def load_encoders(self, save_dir):
        checkpoint = torch.load(os.path.join(save_dir, "Obs_Encoder.pth"))
        self.encoder_obs.load_state_dict(checkpoint)
        checkpoint = torch.load(os.path.join(save_dir, "Pred_Encoder.pth"))
        self.encoder_pre.load_state_dict(checkpoint)

    def load_decoder(self, save_dir):
        checkpoint = torch.load(os.path.join(save_dir, "decoder.pth"))
        self.decoder.decoder.load_state_dict(checkpoint)
        checkpoint = torch.load(os.path.join(save_dir, "fc.pth"))
        self.decoder.fc.load_state_dict(checkpoint)

    def load_classifier(self, save_dir):
        checkpoint = torch.load(os.path.join(save_dir, "classifier.pth"))
        self.classifier.load_state_dict(checkpoint)

    def load_synthesizer(self, save_dir):
        checkpoint = torch.load(os.path.join(save_dir, "synthesizer_fc1.pth"))
        self.synthesizer.fc1.load_state_dict(checkpoint)
        checkpoint = torch.load(os.path.join(save_dir, "synthesizer_fc2.pth"))
        self.synthesizer.fc2.load_state_dict(checkpoint)

    def load_models(self, save_dir):
        self.load_encoders(save_dir)
        self.load_decoder(save_dir)
        self.load_classifier(save_dir)
        self.load_synthesizer(save_dir)
