import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv

from model.prediction import Linear


class DynamicGraphConvModel(nn.Module):
    def __init__(self, device: torch.device, hidden_size, output_dim, num_gcn_layers=1, param=None):
        super(DynamicGraphConvModel, self).__init__()
        self.hidden_size = hidden_size
        self.output_dim = output_dim
        self.device = device
        self.num_gcn_layers = num_gcn_layers
        self.gcn_layers = nn.ModuleList([GCNConv(hidden_size, hidden_size) for _ in range(num_gcn_layers)])
        self.gcn_2hop_layers = nn.ModuleList([GCNConv(hidden_size, hidden_size) for _ in range(num_gcn_layers)])
        self.gcn_3hop_layers = nn.ModuleList([GCNConv(hidden_size, hidden_size) for _ in range(num_gcn_layers)])
        self.predictor = Linear(hidden_size)
        self.init = GCNConv(hidden_size, hidden_size)
        self.graph_tmp = None
        self.dropout = nn.Dropout(p=param["dropout"])
        self.projector = nn.Linear(2, hidden_size)

    def init_graph(self, graph_t0):
        self.graph_tmp = graph_t0
        self.one_order_hash = torch.zeros(graph_t0.x.size(0), self.hidden_size).to(self.device)
        self.one_order_hash_embedding = torch.zeros(graph_t0.x.size(0), self.hidden_size).to(self.device)
        self.two_order_hash = torch.ones(graph_t0.x.size(0), self.hidden_size).to(self.device)
        self.two_order_hash_embedding = torch.ones(graph_t0.x.size(0), self.hidden_size).to(self.device)
        self.three_order_hash = torch.ones(graph_t0.x.size(0), self.hidden_size).to(self.device)
        self.three_order_hash_embedding = torch.ones(graph_t0.x.size(0), self.hidden_size).to(self.device)

    # def update_graph(self, delta_x):
    #     indices = delta_x[:, 0].long()
    #     self.graph_tmp.x[indices] = delta_x

    def forward(self, delta_adj, adj_now, target_idx):
        feature_h0 = F.relu(self.projector(self.graph_tmp.x))
        # feature_h0 = self.dropout(feature_h0)
        indices_1_order = delta_adj[1, :]
        self.one_order_hash_embedding[indices_1_order] = torch.ones(self.hidden_size, device=self.device)
        feature_h1 = self.gcn_forward(delta_adj, feature_h0, self.one_order_hash_embedding)

        self.two_order_hash_embedding = torch.bincount(adj_now[1]).view(-1, 1) * self.two_order_hash
        self.two_order_hash_embedding = ((self.two_order_hash_embedding - self.one_order_hash_embedding) > 0).float()
        feature_h2 = self.gcn_2hop_forward(adj_now, feature_h1, feature_h0, self.two_order_hash_embedding)

        # self.three_order_hash_embedding = torch.bincount(adj_now[1]).view(-1, 1) * self.three_order_hash
        # self.three_order_hash_embedding = (
        #         (self.three_order_hash_embedding - self.one_order_hash_embedding) > 0).float()
        # self.three_order_hash_embedding = (
        #         (self.three_order_hash_embedding - self.two_order_hash_embedding) > 0).float()
        # feature_h3 = self.gcn_3hop_forward(adj_now, feature_h2, feature_h1, self.three_order_hash_embedding)

        pred = torch.zeros(self.graph_tmp.x.size(0)).to(self.device)
        if len(target_idx) > 0:
            emb = feature_h2[target_idx]
            pred[target_idx] = self.predictor(emb)
            # # 把pred中小于0的值变成1
            # pred[pred < 1] = 1
            # pred[target_idx] = torch.log2(pred[target_idx])
        return pred

    def gcn_forward(self, adj, feature, one_order_hash_embedding):
        # here the adj is the delta_adj
        H0 = feature
        for gcn_layer in self.gcn_layers:
            H1 = F.relu(gcn_layer(H0, adj))
            H0 = H1
        final_1order_embedding = one_order_hash_embedding * H0 + (1 - one_order_hash_embedding) * feature # 没有邻居的节点卷积之后特征为0了，所以要加上原来的特征
        return final_1order_embedding

    def gcn_2hop_forward(self, adj, then_embedding_x, original_embedding_x, two_hash_embedding):
        # here the adj is the whole adj of graph at this time
        delta_H = then_embedding_x - original_embedding_x
        H0 = delta_H
        # this is the delta representation we need to propagate to the neighbouring node
        for gcn_layer in self.gcn_2hop_layers:
            H1 = F.relu(gcn_layer(H0, adj))
            H0 = H1
        final_2order_embedding = (1 - two_hash_embedding) * then_embedding_x + two_hash_embedding * H0
        return final_2order_embedding

    def gcn_3hop_forward(self, adj, then_embedding_x, original_embedding_x, three_order_hash_embedding):
        delta_H = then_embedding_x - original_embedding_x
        H0 = delta_H
        for gcn_layer in self.gcn_3hop_layers:
            H1 = F.relu(gcn_layer(H0, adj))
            H0 = H1
        final_3order_embedding = three_order_hash_embedding * H0 + (1 - three_order_hash_embedding) * then_embedding_x
        return final_3order_embedding
