import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_scatter
from models import DGI, GraphCL, Lp,GcnLayers
from layers import GCN, AvgReadout 
import tqdm
import numpy as np
from sklearn.decomposition import PCA

########################################################################################
#downprompt
def spectral_regularization_smooth(x, x0, eivec, eival, thres):
    relu = torch.nn.ReLU()
    x_out = torch.einsum('nm,md->nd', eivec, x)
    x0_out = torch.einsum('nm,md->nd', eivec, x0)

    delta = ( x_out[:-1].t() * eival[:-1] - x_out[1:].t() * eival[1:] ).t().abs()
    delta0 = ( x0_out[:-1].t() * eival[:-1] - x0_out[1:].t() * eival[1:] ).t().abs()

    loss = relu(delta - thres * delta0)[(eival[:-1]-eival[1:])>1e-2].mean()
    return loss
    
# def spectral_regularization_lowpass(x, x0, eivec, eival, thres):
#     relu = torch.nn.ReLU()
#     x_out = torch.einsum('nm,md->nd', eivec, x)
#     x0_out = torch.einsum('nm,md->nd', eivec, x0)

#     loss = relu(x_out.abs() - thres * x0_out.abs())[x0_out.abs()>1e-2].mean()
#     return loss

class RoutingNetwork(nn.Module):
    def __init__(self, input_dim=50, dropout_rate=0.1):
        super(RoutingNetwork, self).__init__()
        self.layer1 = nn.Linear(input_dim, 64)
        self.dropout = nn.Dropout(dropout_rate)
        self.layer2 = nn.Linear(64, 1)

    def forward(self, x):
        x = F.relu(self.layer1(x))  # [4, 64]
        x = self.dropout(x)
        x = self.layer2(x)  # [4, 1]
        x = F.softmax(x, dim=0)
        return x

class downprompt(nn.Module):
    def __init__(self, weights_list, ft_in, nb_classes, type, feature_dim, num_tokens=4, dropout_rate=0.1):
        super(downprompt, self).__init__()
        self.prefeature = prefeatureprompt(weights_list,dim=feature_dim,type=type, num_tokens=num_tokens, dropout_rate=dropout_rate)
        self.nb_classes = nb_classes
        self.leakyrelu = nn.ELU()
        self.one = torch.ones(1,ft_in).cuda()
        self.ave = torch.FloatTensor(nb_classes,ft_in).cuda()

    def forward(self, eivec, eival, thres, features,adj,sparse,gcn,idx,seq,labels=None,train=0): # [7650, 50] [7650, 50] true [50 -> 256] [1650] [1650, 256] [1650] 1
        features1 = self.prefeature(features) # [7650, 50]
        reg_loss = spectral_regularization_smooth(features1, features, eivec, eival, thres)
        embeds1 = gcn(features1,adj, sparse, None).squeeze() #[7650, 256]
        pretrain_embs1 = embeds1[idx] #[1650, 256]        
        rawret = pretrain_embs1 #[1650, 256]
        rawret = rawret.cuda() #[1650, 256]
        if train == 1:
            self.ave = averageemb(labels=labels, rawret=rawret,nb_class=self.nb_classes) # [8, 256] 八个类别的典型特征 注意这里不一定是8 出现几个类别就会有几个
        ret = torch.FloatTensor(seq.shape[0],self.nb_classes).cuda() # 注意这里是没啥用的pretrain_embs [1650, 256] 8 #ret [1650, 8]
        rawret = torch.cat((rawret,self.ave),dim=0) #[1658, 256]
        rawret = torch.cosine_similarity(rawret.unsqueeze(1), rawret.unsqueeze(0), dim=-1) # [1658, 1, 256] [1, 1658, 256]
        # 当在 dim=-1 上计算余弦相似度时，这两个张量会生成一个余弦相似度矩阵，结果形状为 [1658, 1658]。
        ret = rawret[:seq.shape[0],seq.shape[0]:]
        # [1650, 8] 只要前1650行的后8列
        ret = F.softmax(ret, dim=1)
        en_result = self.calculate_uncertainty(ret)
        ret = ret.float().cuda()
        en_result = en_result.float().cuda()
        return ret, en_result, reg_loss

    def weights_init(self, m):
        if isinstance(m, nn.Linear):
            torch.nn.init.xavier_uniform_(m.weight.data)
            if m.bias is not None:
                m.bias.data.fill_(0.0)
    
    #计算softmax输出结果的不确定度
    def calculate_uncertainty(self, softmax_output):
        # 避免 log(0) 导致的计算错误
        epsilon = 1e-8
        # 计算熵
        entropy = -torch.sum(softmax_output * torch.log(softmax_output + epsilon), dim=1)
        return entropy

class downstreamprompt(nn.Module):
    def __init__(self,hid_units):
        super(downstreamprompt, self).__init__()
        self.weight= nn.Parameter(torch.FloatTensor(1,hid_units), requires_grad=True)
        self.act = nn.ELU()
        self.reset_parameters()
    def reset_parameters(self):
        torch.nn.init.xavier_uniform_(self.weight)
    def forward(self, graph_embedding):
        graph_embedding=self.weight * graph_embedding
        return graph_embedding
    
class composedtoken(nn.Module):
    def __init__(self, weights_list, type:str, num_tokens=4, dropout_rate=0.1):
        super(composedtoken, self).__init__()
        self.texttoken = weights_list #先把num_token个token拼在一起
        self.prompt = weighted_prompt(num_tokens, dropout_rate).cuda() # 准备num_token个系数
        self.type = type

    def forward(self,seq):
        texttoken = self.prompt(self.texttoken) #里面是一个矩阵乘法[num_tokens, 1] [numtokens, 50]
        if self.type == 'add':
            texttoken = texttoken.repeat(seq.shape[0],1)
            rets = texttoken + seq
        if self.type == 'mul':
            rets = texttoken * seq
        return rets #返回的是一个mix_token

class prefeatureprompt(nn.Module):
    def __init__(self, weights_list, dim,type:str,num_tokens=4, dropout_rate=0.1):
        super(prefeatureprompt, self).__init__()
        self.precomposedfeature = composedtoken(weights_list, type, num_tokens, dropout_rate=dropout_rate) # Mix-prompt
        self.preopenfeature = downstreamprompt(dim) # unify-prompt
        self.combineprompt = combineprompt()
    def forward(self,seq):
        seq1 = self.precomposedfeature(seq)
        seq2 = self.preopenfeature(seq)
        #用两个prompt分别处理一次[7650, 50]特征值，利用两个系数组合在一起 
        ret =  self.combineprompt(seq1 ,seq2) 
        return ret

class combineprompt(nn.Module):
    def __init__(self):
        super(combineprompt, self).__init__()
        self.weight = nn.Parameter(torch.FloatTensor(1, 2), requires_grad=True)
        self.act = nn.ELU()
        self.reset_parameters()

    def reset_parameters(self):
        torch.nn.init.xavier_uniform_(self.weight)
    def forward(self, graph_embedding1, graph_embedding2):
        graph_embedding = self.weight[0][0] * graph_embedding1 + self.weight[0][1] * graph_embedding2
        return self.act(graph_embedding)
    
class weighted_prompt(nn.Module):
    def __init__(self,weightednum, dropout_rate=0.1):
        super(weighted_prompt, self).__init__()
        self.weight= nn.Parameter(torch.FloatTensor(1,weightednum), requires_grad=True)
        self.act = nn.ELU()
        self.reset_parameters()
        self.routingnet = RoutingNetwork(dropout_rate=dropout_rate)
    def reset_parameters(self):
        torch.nn.init.xavier_uniform_(self.weight)

    def forward(self, graph_embedding):
        # graph_embedding=torch.mm(self.weight,graph_embedding)
        graph_embedding = self.weight.T * graph_embedding #[1, 5] [5, 50]
        guide_weights = self.routingnet(graph_embedding) #[4, 1]
        graph_embedding=torch.mm(guide_weights.T,graph_embedding) #[1, 50]
        return graph_embedding
    
def averageemb(labels,rawret,nb_class):
    retlabel = torch_scatter.scatter(src=rawret,index=labels,dim=0,reduce='mean')
    return retlabel


########################################################################################
#preprompt

class PrePrompt(nn.Module):
    def __init__(self, n_in, n_h, activation, sample, num_layers_num, p, type, variance_weight, num_tokens=4, n_samples=3):
        super(PrePrompt, self).__init__()
        self.lp = Lp(n_in, n_h)
        self.gcn = GcnLayers(n_in, n_h, num_layers_num, p)
        self.read = AvgReadout()
        self.prompttype = type
        self.negative_sample = torch.tensor(sample, dtype=int).cuda()
        self.loss = nn.BCEWithLogitsLoss()

        # Continuous mask logits: num_tokens x n_in
        self.masks_logits = nn.Parameter(torch.randn(num_tokens, n_in))  # (num_tokens, n_in)
        self.n_samples = n_samples

        # Variance loss weight
        self.variance_weight = variance_weight

    def forward(self, seq_list, adj_list, sparse, msk, samp_bias1, samp_bias2):
        # 输入形状
        # seq_list: [num_tokens, N, n_in], 每个图的节点特征
        # adj_list: [num_tokens], 每个图的邻接矩阵

        # 计算连续掩码 mask_prob 和离散掩码 invariant_mask
        mask_prob = torch.sigmoid(self.masks_logits)  # (num_tokens, n_in)

        # 对 seq_list 中的每个图应用连续掩码 mask_prob
        masked_features = [seq * mask_prob[i].unsqueeze(0) for i, seq in enumerate(seq_list)] # masked_features: [num_tokens, N, n_in]
        # 对掩码后的特征计算 prelogits
        prelogits = [self.lp(self.gcn, preseq, adj, sparse) for preseq, adj in zip(masked_features, adj_list)] # prelogits: [num_tokens, N, n_h]

        logits = torch.cat(prelogits, dim=0)  # 拼接所有图的 prelogits logits: [总节点数, n_h]

        # 计算分类损失 lploss
        lploss = compareloss(logits, self.negative_sample, temperature=1)  # 标量

        # 添加随机扰动，计算方差损失 variance_loss
        loss_variances = []
        for i in range(self.n_samples):
            random_noise = torch.randn(1, 50, device= seq_list[0].device)
            random_noise = random_noise * (1 - mask_prob[i].unsqueeze(0))  # [1, 50]
    
            noisy_features = [
                masked + random_noise.expand_as(masked) * (1 - mask_prob[j].unsqueeze(0))
                for j, masked in enumerate(masked_features)
            ] # noisy_features: [num_tokens, N, n_in]
            noisy_prelogits = [
                self.lp(self.gcn, noisy_seq, adj, sparse) 
                for noisy_seq, adj in zip(noisy_features, adj_list)
            ]  # noisy_prelogits: [num_tokens, N, n_h]

            noisy_logits = torch.cat(noisy_prelogits, dim=0)  # 拼接所有图的扰动预输出
            # noisy_logits: [总节点数, n_h]

            noisy_loss = compareloss(noisy_logits, self.negative_sample, temperature=1)  # 标量
            loss_variances.append(noisy_loss)
        # loss_variances: [n_samples]
        loss_variances = torch.stack(loss_variances)  # 转换为 Tensor
        variance_loss = torch.var(loss_variances)  # 方差计算
        # 6. 总损失 = 分类损失 + 方差损失
        total_loss = lploss + self.variance_weight * variance_loss  # 标量
        print(f"lploss: {lploss.item()}, variance_loss: {variance_loss.item()}")
        return total_loss

    def embed(self, seq, adj, sparse, msk,LP): 
        h_1 = self.gcn(seq, adj, sparse,LP)
        c = self.read(h_1, msk) #注意这里的msk时None不是咱们的
        return h_1.detach(), c.detach()
    
def mygather(feature, index):
    input_size=index.size(0)
    index = index.flatten()
    index = index.reshape(len(index), 1)
    index = torch.broadcast_to(index, (len(index), feature.size(1)))
    res = torch.gather(feature, dim=0, index=index)
    return res.reshape(input_size,-1,feature.size(1))

def compareloss(feature,tuples,temperature):
    h_tuples=mygather(feature,tuples)
    temp = torch.arange(0, len(tuples))
    temp = temp.reshape(-1, 1)
    temp = torch.broadcast_to(temp, (temp.size(0), tuples.size(1)))
    temp=temp.cuda()
    h_i = mygather(feature, temp)
    sim = F.cosine_similarity(h_i, h_tuples, dim=2)
    exp = torch.exp(sim)
    exp = exp / temperature
    exp = exp.permute(1, 0)
    numerator = exp[0].reshape(-1, 1)
    denominator = exp[1:exp.size(0)]
    denominator = denominator.permute(1, 0)
    denominator = denominator.sum(dim=1, keepdim=True)
    res = -1 * torch.log(numerator / denominator)
    return res.mean()

def pca_compression(seq,k):
    pca = PCA(n_components=k)
    seq = pca.fit_transform(seq)
    
    print(pca.explained_variance_ratio_.sum())
    return seq

def prompt_pretrain_sample(adj,n):
    nodenum=adj.shape[0]
    indices=adj.indices
    indptr=adj.indptr
    res=np.zeros((nodenum,1+n))
    whole=np.array(range(nodenum))
    for i in range(nodenum):
        nonzero_index_i_row=indices[indptr[i]:indptr[i+1]]
        zero_index_i_row=np.setdiff1d(whole,nonzero_index_i_row)
        np.random.shuffle(nonzero_index_i_row)
        np.random.shuffle(zero_index_i_row)
        if np.size(nonzero_index_i_row)==0:
            res[i][0] = i
        else:
            res[i][0]=nonzero_index_i_row[0]
        res[i][1:1+n]=zero_index_i_row[0:n]
    return res.astype(int)



