from torch_geometric.data import Data
from torch_geometric.nn import MessagePassing
import networkx as nx
import numpy as np
import torch
import torch.nn.functional as F


def to_edge_index(G):
    adj = nx.to_scipy_sparse_array(G).tocoo()
    row = torch.from_numpy(adj.row.astype(np.int64)).to(torch.long)
    col = torch.from_numpy(adj.col.astype(np.int64)).to(torch.long)
    return torch.stack([row, col], dim=0)


def init_state(G):
    init_param = np.random.rand(2)
    init_param /= init_param.sum()
    random_generator = np.random.default_rng()
    initial_state = random_generator.multinomial(1, init_param, G.number_of_nodes())[
        ..., -1
    ]
    return torch.tensor(initial_state).unsqueeze(-1).to(torch.float)


def independent(l, p):
    return 1 - (1 - p) ** l


def recovery(q):
    return q


class num_of_l(MessagePassing):
    def __init__(self):
        super().__init__(aggr="add")

    def forward(self, data):
        return self.propagate(data.edge_index, x=data.x)


def generate_data(b, g, G):
    edge_index = to_edge_index(G)
    update = dynamics(b, g)

    dataset = []
    for _ in range(500):
        initial_state = init_state(G)
        today = Data(x=initial_state, edge_index=edge_index)
        for _ in range(2):
            ltp = update.cal_ltp(today)
            out = update.sample(ltp)
            today.y = F.one_hot(out.view(-1).to(torch.long), 2)
            dataset.append(today)
            today = Data(x=out, edge_index=edge_index)

    return dataset


def simulate(b, g, G):
    edge_index = to_edge_index(G)
    update = dynamics(b, g)

    initial_state = init_state(G)
    today = Data(x=initial_state, edge_index=edge_index)

    for _ in range(300):
        ltp = update.cal_ltp(today)
        out = update.sample(ltp)
        today.y = F.one_hot(out.view(-1).to(torch.long), 2)
        today = Data(x=out, edge_index=edge_index)

    sum = 0
    count = 400
    for _ in range(count):
        ltp = update.cal_ltp(today)
        out = update.sample(ltp)
        today.y = F.one_hot(out.view(-1).to(torch.long), 2)
        today = Data(x=out, edge_index=edge_index)
        sum += out.view(-1).sum()

    avg = sum / count
    return avg / 2000


def recur(model, G):
    model.to("cpu")
    edge_index = to_edge_index(G)
    initial_state = init_state(G)
    today = Data(x=initial_state, edge_index=edge_index)
    update = dynamics()

    for _ in range(300):
        ltp = model(today)
        out = update.sample(ltp)
        today.y = F.one_hot(out.view(-1).to(torch.long), 2)
        today = Data(x=out, edge_index=edge_index)

    sum = 0
    count = 400
    for _ in range(count):
        ltp = model(today)
        out = update.sample(ltp)
        today.y = F.one_hot(out.view(-1).to(torch.long), 2)
        today = Data(x=out, edge_index=edge_index)
        sum += out.view(-1).sum()

    avg = sum / count
    return avg / 2000


class dynamics:
    def __init__(self, b=0.04, g=0.08):
        self.beta = b
        self.gamma = g
        self.i_n = num_of_l()

    def cal_ltp(self, data):
        n_neighbor = self.i_n(data)
        p = independent(n_neighbor, self.beta)
        q = recovery(self.gamma)

        ltp = np.zeros((data.num_nodes, 2))
        x = data.x
        x = np.array(x.reshape(data.num_nodes))
        p = np.array(p.reshape(data.num_nodes))

        ltp[x == 0, 0] = 1 - p[x == 0]
        ltp[x == 0, 1] = p[x == 0]
        ltp[x == 1, 0] = q
        ltp[x == 1, 1] = 1 - q

        return ltp

    def sample(self, ltp):
        if isinstance(ltp, np.ndarray):
            ltp = torch.tensor(ltp)
        dist = torch.distributions.Categorical(ltp)
        out = np.array(dist.sample())
        return torch.tensor(out).view(-1, 1).to(torch.float)
