import torch
from torch.nn import Embedding
from torch.utils.data import DataLoader
from torch_geometric.nn import GCNConv
from torch_sparse import SparseTensor
from torch_geometric.utils.num_nodes import maybe_num_nodes

from Model.Module.GCN import GCN

# try:
#     import torch_cluster  # noqa
#
#     random_walk = torch.ops.torch_cluster.random_walk
# except ImportError:
#     random_walk = None

EPS = 1e-15
from Model.ge.walker import RandomWalker
from torch_geometric.utils import to_networkx

class Gcnconv1(torch.nn.Module):
    def __init__(
            self,
            data,
            num_feature,
            **kwargs):
        super(Gcnconv1, self).__init__()

        # if random_walk is None:
        #     raise ImportError('`Node2vecGcn` requires `torch-cluster`.')
        N = maybe_num_nodes(data.edge_index, None)
        row, col = data.edge_index
        self.data=data
        self.diggraph =to_networkx(data)
        self.graph=self.diggraph.to_undirected()

        self.adj = SparseTensor(row=row, col=col, sparse_sizes=(N, N))
        self.adj = self.adj.to(kwargs['device'])

        assert kwargs['walk_length'] >= kwargs['context_size']
        self.embedding_dim = kwargs['model_layer'][0]
        self.walk_length = kwargs['walk_length']
        self.context_size = kwargs['context_size']
        self.walks_per_node = kwargs['walks_per_node']
        self.p = kwargs['p']
        self.q = kwargs['q']
        self.num_negative_samples = int(kwargs['num_negative_samples'])
        self.embedding = GCNConv(num_feature, kwargs['model_layer'][0])
        self.rw=None

    def reset_parameters(self):
        self.embedding.reset_parameters()

    # def forward(self, batch=None):
    #     """Returns the embeddings for the nodes in :obj:`batch`."""
    #     emb = self.embedding.weight
    #     return emb if batch is None else emb[batch]

    def forward(self, x, edge_index, edge_attr):
        x = self.embedding(x, edge_index, edge_attr)
        return x

    def loader(self, **kwargs):
        return DataLoader(range(self.adj.sparse_size(0)),
                          collate_fn=self.sample, **kwargs)

    def pos_rw(self):
        if self.rw==None:
            self.walker = RandomWalker(self.graph, p=1, q=1, use_rejection_sampling=0)
            print("Preprocess transition probs...")
            self.walker.preprocess_transition_probs()
            self.rw = self.walker.simulate_walks(num_walks=self.walks_per_node, walk_length=self.walk_length, workers=1, verbose=1)
        return self.rw

    def pos_sample(self, batch):
        self.sentences = self.pos_rw()
        rw=[]
        batch = batch.repeat(self.walks_per_node)
        for rw_it in self.sentences:
            if rw_it[0] in batch:
                rw.append(rw_it)
        rw=torch.tensor(rw)
        walks = []
        num_walks_per_rw = 1 + self.walk_length  - self.context_size
        for j in range(num_walks_per_rw):
            walks.append(rw[:, j:j + self.context_size])
        return torch.cat(walks, dim=0)

    def neg_sample(self, batch):
        batch = batch.repeat(self.walks_per_node * self.num_negative_samples)

        rw = torch.randint(self.adj.sparse_size(0),
                           (batch.size(0), self.walk_length))
        rw = torch.cat([batch.view(-1, 1), rw], dim=-1)

        walks = []
        num_walks_per_rw = 1 + self.walk_length + 1 - self.context_size
        for j in range(num_walks_per_rw):
            walks.append(rw[:, j:j + self.context_size])
        return torch.cat(walks, dim=0)

    def sample(self, batch):
        if not isinstance(batch, torch.Tensor):
            batch = torch.tensor(batch)
        return self.pos_sample(batch), self.neg_sample(batch)

    def loss(self, embedding, pos_rw, neg_rw):
        r"""Computes the loss given positive and negative random walks."""

        # Positive loss.
        start, rest = pos_rw[:, 0], pos_rw[:, 1:].contiguous()

        h_start = embedding[start].view(pos_rw.size(0), 1, self.embedding_dim)
        h_rest = embedding[rest.view(-1)].view(pos_rw.size(0), -1, self.embedding_dim)

        out = (h_start * h_rest).sum(dim=-1).view(-1)
        pos_loss = -torch.log(torch.sigmoid(out) + EPS).mean()

        # Negative loss.
        start, rest = neg_rw[:, 0], neg_rw[:, 1:].contiguous()

        h_start = embedding[start].view(neg_rw.size(0), 1,
                                        self.embedding_dim)
        h_rest = embedding[rest.view(-1)].view(neg_rw.size(0), -1,
                                               self.embedding_dim)

        out = (h_start * h_rest).sum(dim=-1).view(-1)
        neg_loss = -torch.log(1 - torch.sigmoid(out) + EPS).mean()
        return pos_loss + neg_loss

    def __repr__(self):
        return '{}({}, {})'.format(self.__class__.__name__,
                                   self.embedding.weight.size(0),
                                   self.embedding.weight.size(1))
