import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import SAGPooling, GCNConv, GATConv, global_mean_pool, global_max_pool
from torch_geometric.utils import to_dense_batch, scatter

from util import to_sparse_batch


class AggrModel(nn.Module):
    def __init__(self, env_feats, embed_size, aggr_hidden, aggr_layer=1):
        super(AggrModel, self).__init__()

        self.env_feats = env_feats
        self.embed_size = embed_size

        self.lstm = nn.LSTM(embed_size, aggr_hidden, aggr_layer, batch_first=True) # TODO: dropout

    def forward(self, x):
        # in.shape: (~B*L, w, H_e)
        # out.shape: (~B*L, H_en)
        bl = x.shape[0]

        _, (h_n, _) = self.lstm(x)          # h_n: (lay, ~BL, h)
        x = h_n.transpose(0, 1).contiguous().reshape(bl, -1)  # (~BL, H_en)

        assert self.env_feats == x.shape[-1]
        return x


class TemporalModel(nn.Module):
    def __init__(self, in_feats, hidden, layers=1, rnn='lstm'):
        super(TemporalModel, self).__init__()

        self.in_feats = in_feats
        self.hidden = hidden
        self.layers = layers
        self.rnn = rnn

        if self.rnn == 'lstm':
            self.net = nn.LSTM(self.in_feats, self.hidden, self.layers, batch_first=True)
        elif self.rnn == 'gru':
            self.net = nn.GRU(self.in_feats, self.hidden, self.layers, batch_first=True)
        elif self.rnn == 'rnn':
            self.net = nn.RNN(self.in_feats, self.hidden, self.layers, batch_first=True)
        else:
            raise ValueError(f"RNN type {rnn} for temporal model is not supported.")

    def forward(self, x, batch):
        # in.shape: (~B*L, F_s)
        # out.shape: (~B*L, H_te)

        x_paded, _ = to_dense_batch(x, batch, fill_value=0)        # (B, max(L), F_s)
        out, _ = self.net(x_paded)                              # (B, max(L), H_te)   # TODO:pack
        x_te = to_sparse_batch(out, batch)                         # (~BL, H_te)

        return x_te


class STGNN(nn.Module):
    def __init__(self, in_feats, hidden, pooling_ratio=0.5):
        super(STGNN, self).__init__()

        self.num_features = in_feats
        self.nhid = hidden
        self.pooling_ratio = pooling_ratio

        self.conv1 = GCNConv(self.num_features, self.nhid)
        self.conv1 = GCNConv(self.num_features, self.nhid)
        self.pool1 = SAGPooling(self.nhid, ratio=self.pooling_ratio)   # TODO:GAT
        self.conv2 = GCNConv(self.nhid, self.nhid)
        self.pool2 = SAGPooling(self.nhid, ratio=self.pooling_ratio)
        self.conv3 = GCNConv(self.nhid, self.nhid)
        self.pool3 = SAGPooling(self.nhid, ratio=self.pooling_ratio)

    def forward(self, x, edge_index, batch):
        # in.x.shape: (~B*L, H_te + H_me)
        # out.shape: (B, 2*H_st)

        x = F.relu(self.conv1(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch)
        x1 = torch.cat([global_max_pool(x, batch), global_mean_pool(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch)
        x2 = torch.cat([global_max_pool(x, batch), global_mean_pool(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch)
        x3 = torch.cat([global_max_pool(x, batch), global_mean_pool(x, batch)], dim=1)

        x = x1 + x2 + x3
        return x


class Detector(nn.Module):
    def __init__(self, in_feats, dropout=0.5):
        super(Detector, self).__init__()

        self.nhid = in_feats
        self.dropout = dropout

        self.lin1 = torch.nn.Linear(self.nhid, self.nhid // 2)
        self.lin2 = torch.nn.Linear(self.nhid // 2, self.nhid // 2)
        self.lin3 = torch.nn.Linear(self.nhid // 2, 2)

    def forward(self, x):
        # in.shape: (B, 2*H_st)
        # out.shape: (B, 2)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = F.relu(self.lin2(x))
        # x = F.log_softmax(self.lin3(x), dim=-1)
        x = self.lin3(x)

        return x

