import utils as u
import torch
from torch.nn.parameter import Parameter
import torch.nn as nn
import math
import pdb

class REGCN(torch.nn.Module):
    def __init__(self, args, activation, device='cpu', skipfeats=False):
        super().__init__()
        GRCU_args = u.Namespace({})
        feats = [args.feats_per_node,
                 args.layer_1_feats,
                 args.layer_2_feats]
        self.device = device
        self.skipfeats = skipfeats
        self.GRCU_layers = []
        self.num_relations = args.num_relations
        self.aggregation = args.aggregation if hasattr(args, 'aggregation') else 'sum'  # 聚合方式
        #if self.aggregation == 'attention':
        self.relation_att = nn.Parameter(torch.Tensor(args.num_relations, 1))
        nn.init.xavier_uniform_(self.relation_att)
        #pdb.set_trace()
        #self._parameters = nn.ParameterList()
        self.custom_param_list = nn.ParameterList()
        self.linear1 = nn.Parameter(torch.Tensor(args.layer_2_feats,args.feats_per_node))
        nn.init.xavier_uniform_(self.linear1)
        #pdb.set_trace()
        for i in range(1,len(feats)):
            GRCU_args = u.Namespace({
                'in_feats': feats[i-1],
                'out_feats': feats[i],
                'activation': activation
            })
            #print("GRCU_args init")
            #print(GRCU_args)
            grcu_i = RGRCU(GRCU_args, num_relations=self.num_relations, aggregation=self.aggregation, relation_att=self.relation_att)
            self.GRCU_layers.append(grcu_i.to(self.device))
            self.custom_param_list.extend(list(self.GRCU_layers[-1].parameters()))
        
    def parameters(self):
        #return self._parameters
        return self.custom_param_list

    def forward(self, A_lists, Nodes_lists, mask_lists,num_predict=5):
        out = []
        node_feats = Nodes_lists[0][-1]  # 取第一个关系的最后节点特征
        Nodes_list = Nodes_lists[0]
        Nodes_list_prev = Nodes_lists[0]
        out2inputweights = self.linear1
        device = Nodes_list[-1].device
        # 将 out2inputweights 移到与输入相同的设备
        out2inputweights = out2inputweights.to(device)
        for i in range(num_predict):
            # print("Nodes_list prev")
            # print(len(Nodes_list[0][0]))
            for unit in self.GRCU_layers:
                Nodes_list = unit(A_lists, Nodes_list)
            out.append(Nodes_list[-1])   # 取第一个关系的输出
            # print("Nodes_list out")
            # print(len(Nodes_list[0][0]))
            if self.skipfeats:
                out.append(torch.cat((out, node_feats), dim=1)) 
            next_input = Nodes_list[-1].matmul(out2inputweights) #256 to 1540
            node_feats = next_input
            Nodes_list_prev.pop(0)
            Nodes_list_prev.append(next_input)
            Nodes_list = Nodes_list_prev
        # print(len(out))
        # print(len(out[0]))
        # print(len(out[0][0]))
        return out

class RGRCU(torch.nn.Module):
    def __init__(self, args, num_relations, aggregation, relation_att):
        super().__init__()
        self.args = args
        self.num_relations = num_relations 
        cell_args = u.Namespace({})
        cell_args.rows = args.in_feats
        cell_args.cols = args.out_feats
        self.aggregation = aggregation
        self.relation_att = relation_att
        
        self.GCN_init_weights_r = nn.ParameterList([
            Parameter(torch.Tensor(args.in_feats, args.out_feats)) 
            for _ in range(self.num_relations)
        ])

        for weight in self.GCN_init_weights_r:
            self.reset_param(weight)
            
        self.evolve_weights = mat_GRU_cell(cell_args)
        self.activation = self.args.activation

    def reset_param(self, t):
        stdv = 1. / math.sqrt(t.size(1))
        t.data.uniform_(-stdv, stdv)

    def forward(self, A_lists, node_embs_list):
        GCN_weights_r = [weight for weight in self.GCN_init_weights_r]
        out_seqs = [[] for _ in range(len(A_lists))]
        for r in range(len(A_lists)):
            #pdb.set_trace()
            for t in range(len(A_lists[r])):
                Ahat = A_lists[r][t]
                node_embs = node_embs_list[t]
                GCN_weights_r[r] = self.evolve_weights(GCN_weights_r[r])
                #pdb.set_trace()
                # 应用mask处理
                transformed = self.activation(
                    Ahat.matmul(node_embs.matmul(GCN_weights_r[r]))
                ) 
                out_seqs[r].append(transformed)
        
        #merged_out_seqs = []
        #for t in range(len(out_seqs[0])):
        #    merged = torch.mean(torch.stack([out_seqs[r][t] 
        #                  for r in range(len(out_seqs))]), dim=0)
        #    merged_out_seqs.append(merged)
        #print("merged_out_seqs 0 shape:")
        #print(merged_out_seqs[0].shape)


        merged_out_seqs = []
        num_relations = len(out_seqs)
        
        if self.aggregation == 'attention':
            # 初始化注意力权重参数（每个关系一个权重）
            att_weights = torch.softmax(self.relation_att, dim=0)
            att_weights = att_weights.view(-1, 1, 1)  # 调整为三维用于广播
            
        for t in range(len(out_seqs[0])):
            # 堆叠当前时间步所有关系的输出
            # 结果形状: (num_relations, batch_size, hidden_size)
            stacked = torch.stack([out_seqs[r][t] for r in range(num_relations)])
            
            if self.aggregation == 'sum':
                merged = torch.sum(stacked, dim=0)
            elif self.aggregation == 'mean':
                merged = torch.mean(stacked, dim=0)
            elif self.aggregation == 'attention':
                merged = torch.sum(stacked * att_weights, dim=0)
            else:  # 默认使用第一个关系
                merged = stacked[0]
                
            merged_out_seqs.append(merged) 
        return merged_out_seqs


class EGCN(torch.nn.Module):
    def __init__(self, args, activation, device='cpu', skipfeats=False):
        super().__init__()
        GRCU_args = u.Namespace({})

        feats = [args.feats_per_node,
                 args.layer_1_feats,
                 args.layer_2_feats]
        self.device = device
        self.skipfeats = skipfeats
        self.GRCU_layers = []
        self._parameters = nn.ParameterList()
        for i in range(1,len(feats)):
            GRCU_args = u.Namespace({'in_feats' : feats[i-1],
                                     'out_feats': feats[i],
                                     'activation': activation})

            grcu_i = GRCU(GRCU_args)
            #print (i,'grcu_i', grcu_i)
            self.GRCU_layers.append(grcu_i.to(self.device))
            self._parameters.extend(list(self.GRCU_layers[-1].parameters()))

    def parameters(self):
        return self._parameters

    def forward(self,A_list, Nodes_list,nodes_mask_list):
        ## pdb.set_trace()
        node_feats= Nodes_list[-1]

        for unit in self.GRCU_layers:
            Nodes_list = unit(A_list,Nodes_list)#,nodes_mask_list)

        out = Nodes_list[-1]
        if self.skipfeats:
            out = torch.cat((out,node_feats), dim=1)   # use node_feats.to_dense() if 2hot encoded input 
        return out


class GRCU(torch.nn.Module):
    def __init__(self,args):
        super().__init__()
        self.args = args
        cell_args = u.Namespace({})
        cell_args.rows = args.in_feats
        cell_args.cols = args.out_feats

        self.evolve_weights = mat_GRU_cell(cell_args)

        self.activation = self.args.activation
        self.GCN_init_weights = Parameter(torch.Tensor(self.args.in_feats,self.args.out_feats))
        self.reset_param(self.GCN_init_weights)

    def reset_param(self,t):
        #Initialize based on the number of columns
        stdv = 1. / math.sqrt(t.size(1))
        t.data.uniform_(-stdv,stdv)

    def forward(self,A_list,node_embs_list):#,mask_list):
        GCN_weights = self.GCN_init_weights
        out_seq = []
        ## pdb.set_trace()
        for t,Ahat in enumerate(A_list):
            node_embs = node_embs_list[t]
            ## pdb.set_trace()
            #first evolve the weights from the initial and use the new weights with the node_embs
            GCN_weights = self.evolve_weights(GCN_weights)#,node_embs,mask_list[t])
            node_embs = self.activation(Ahat.matmul(node_embs.matmul(GCN_weights)))

            out_seq.append(node_embs)

        return out_seq

class mat_GRU_cell(torch.nn.Module):
    def __init__(self,args):
        super().__init__()
        self.args = args
        self.update = mat_GRU_gate(args.rows,
                                   args.cols,
                                   torch.nn.Sigmoid())

        self.reset = mat_GRU_gate(args.rows,
                                   args.cols,
                                   torch.nn.Sigmoid())

        self.htilda = mat_GRU_gate(args.rows,
                                   args.cols,
                                   torch.nn.Tanh())
        
        self.choose_topk = TopK(feats = args.rows,
                                k = args.cols)

    def forward(self,prev_Q):#,prev_Z,mask):
        # z_topk = self.choose_topk(prev_Z,mask)
        z_topk = prev_Q

        update = self.update(z_topk,prev_Q)
        reset = self.reset(z_topk,prev_Q)

        h_cap = reset * prev_Q
        h_cap = self.htilda(z_topk, h_cap)

        new_Q = (1 - update) * prev_Q + update * h_cap

        return new_Q

        

class mat_GRU_gate(torch.nn.Module):
    def __init__(self,rows,cols,activation):
        super().__init__()
        self.activation = activation
        #the k here should be in_feats which is actually the rows
        self.W = Parameter(torch.Tensor(rows,rows))
        self.reset_param(self.W)

        self.U = Parameter(torch.Tensor(rows,rows))
        self.reset_param(self.U)

        self.bias = Parameter(torch.zeros(rows,cols))

    def reset_param(self,t):
        #Initialize based on the number of columns
        stdv = 1. / math.sqrt(t.size(1))
        t.data.uniform_(-stdv,stdv)

    def forward(self,x,hidden):
        out = self.activation(self.W.matmul(x) + \
                              self.U.matmul(hidden) + \
                              self.bias)

        return out

class TopK(torch.nn.Module):
    def __init__(self,feats,k):
        super().__init__()
        self.scorer = Parameter(torch.Tensor(feats,1))
        self.reset_param(self.scorer)
        
        self.k = k

    def reset_param(self,t):
        #Initialize based on the number of rows
        stdv = 1. / math.sqrt(t.size(0))
        t.data.uniform_(-stdv,stdv)

    def forward(self,node_embs,mask):
        scores = node_embs.matmul(self.scorer) / self.scorer.norm()
        scores = scores + mask

        vals, topk_indices = scores.view(-1).topk(self.k)
        topk_indices = topk_indices[vals > -float("Inf")]

        if topk_indices.size(0) < self.k:
            topk_indices = u.pad_with_last_val(topk_indices,self.k)
            
        tanh = torch.nn.Tanh()

        if isinstance(node_embs, torch.sparse.FloatTensor) or \
           isinstance(node_embs, torch.cuda.sparse.FloatTensor):
            node_embs = node_embs.to_dense()

        out = node_embs[topk_indices] * tanh(scores[topk_indices].view(-1,1))

        #we need to transpose the output
        return out.t()
