import torch
import torch.nn.functional as F
from torch_geometric.nn import GATConv
from torch.nn import Parameter
import os
import torch.nn as nn
import numpy as np
import random
from torch_geometric.nn import LayerNorm


def create_activate(name):
    if name == "relu":
        return nn.ReLU()
    if name == "gelu":
        return nn.GELU()
    if name == "prelu":
        return nn.PReLU()
    if name == "elu":
        return nn.ELU()
    if name is None:
        return nn.Identity()
    
    raise NotImplementedError(f"{name} is not implemented.")


def seed_torch(seed=0):
    import torch    
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
        
def get_triangle(g_):
    g = np.copy(g_)
    np.fill_diagonal(g, 0)
    g_2 = g.dot(g) 
    return g_ * g_2

def compute_q(z, centers=None, alpha=1):
    z_mu = torch.unsqueeze(z, 1) - centers
    molecule = torch.pow( 1.0 + torch.sum( z_mu ** 2, 2) / alpha, - (alpha + 1.0) / 2.0 )
    q = molecule / molecule.sum(dim=1, keepdim=True)
    return q

def compute_p(q):
    p = q**2 / q.sum(dim=0, keepdim=True)
    p /= p.sum(dim=1, keepdim=True)
    return p

class Views:
    def __init__(self, thres, *args):
        self.v = len(args)
        T = np.copy(args[0])
        for i in range(1, self.v):
            T += args[i]
        T = np.where(T<thres, 0.0, 1.0)
        T_ = np.copy(T)

        np.fill_diagonal(T_, 0)
        T_2 = T_.dot(T_)

        self.A, self.A_2, self.A_T, self.A_matrix, self.A_sum_matrix = [], [], [], [], []

        for i in range(self.v):

            self.A.append(torch.from_numpy(np.array(np.nonzero(args[i]))).long().cuda())
            self.A_matrix.append(torch.from_numpy(args[i]).float())  

            A_2 = get_triangle(args[i])
            self.A_2.append(torch.from_numpy(np.array(np.nonzero(A_2))).long().cuda())

            A_T = T_2 * args[i]
            self.A_T.append(torch.from_numpy(np.array(np.nonzero(A_T))).long().cuda())

            A_sum = F.softmax(torch.from_numpy(args[i] + A_2 + A_T).float(), dim=-1).cuda()
            self.A_sum_matrix.append(A_sum)
            
        self.T_matrix = torch.from_numpy(T).float() 
        self.T_sum_matrix = F.softmax(torch.from_numpy(T * T_2 + T).float(), dim=-1).cuda()
        
        self.T = torch.from_numpy(np.array(np.nonzero(T))).long()
        self.T_2 = torch.from_numpy(np.array(np.nonzero(T * T_2))).long()

class GATEncoder(nn.Module):
    def __init__(self, channels, heads=1, drop=0.0, activate='elu'):
        super().__init__()

        self.convs1 = nn.ModuleList([
            GATConv(channels[0], channels[1], heads=heads) for _ in range(3)
        ])
        self.convs2 = nn.ModuleList([
            GATConv(channels[1] * heads, channels[2], heads=heads) for _ in range(3)
        ])
        self.norms = nn.ModuleList([
            LayerNorm(channels[2] * heads) for _ in range(3)
        ])
        self.fc_att = nn.Linear(channels[2] * heads, channels[2] * heads)
        self.att= Parameter(torch.rand(1, channels[2] * heads))
        nn.init.xavier_uniform_(self.att)
        
        self.fc = nn.Linear(channels[2] * heads, channels[2] * heads)

        self.decoder1 = nn.Linear(channels[2] * heads, channels[3])
        self.decoder2 = nn.Linear(channels[3], channels[3])

        self.norm = LayerNorm(channels[2] * heads)
        
        self.drop = drop
        self.activate = create_activate(activate)

        self.apply(self.init_linear)
        
    def init_linear(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        
    def forward(self, x, v1, v2, v3):
        x = F.dropout(x, p=self.drop, training=self.training)
        x_list = []
        alpha = torch.zeros(3,).cuda()
        for i in range(3):
            x_list.append(F.dropout(self.activate(self.convs1[i](x, [v1, v2, v3][i])), p=self.drop, training=self.training))
            x_list[i] = F.dropout(self.activate(self.convs2[i](x_list[i], [v1, v2, v3][i])), p=self.drop, training=self.training)
            x_list[i] = self.norms[i](x_list[i])
            x_ = torch.tanh(self.fc_att(x_list[i]))
            alpha[i] = (x_ * self.att).sum() / x_.shape[0]

        alpha = F.softmax(alpha, dim=0)
        
        x = alpha[0] * x_list[0] + alpha[1] * x_list[1] + alpha[2] * x_list[2]
        
        x = self.activate(self.fc(x)) + x
        x_r = self.activate(self.decoder1(x))
        return self.norm(x), self.activate(self.decoder2(x_r))+x_r, x_list, alpha

class AttForEmb(nn.Module):
    def __init__(self, chan, activate, v):
        super().__init__()
        
        self.fc_att = nn.Linear(chan[0], chan[0])
        self.att= Parameter(torch.rand(1, chan[0]))
        nn.init.xavier_uniform_(self.att)
        
        self.norm = LayerNorm(chan[0])
        self.fc = nn.Linear(chan[0], chan[0])
        
        self.decoder1 = nn.Linear(chan[0], chan[1])
        self.decoder2 = nn.Linear(chan[1], chan[1])
        
        self.activate = create_activate(activate)
        
        self.apply(self.init_linear)
        
        self.v = v
        
    def init_linear(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        
    def forward(self, args):
        
        alpha = torch.zeros(self.v,).cuda()
        
        for i in range(self.v):
            x = torch.tanh(self.fc_att(args[i]))
            alpha[i] = (x * self.att).sum() / x.shape[0]
        
        alpha = F.softmax(alpha, dim=0)
        Z = torch.zeros(args[0].shape).cuda()
        
        for i in range(self.v):
            Z += alpha[i] * args[i]
        Z = self.norm(self.activate(self.fc(Z)+Z))
        Z_r = self.activate(self.decoder1(Z))
        return Z, alpha, self.activate(self.decoder2(Z_r))+Z_r

    
    def Init_center(self, centers):
        self.centers = Parameter(torch.from_numpy(centers).cuda())
