import torch
import torch.nn as nn
from torch_geometric.nn import GCNConv, SAGEConv, GATConv
from GAT1 import GAT
from RNN1 import RNN
import torch.nn.functional as F
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#device=torch.device("cpu")
RNN_LAYER=1
RNN_OUT=1
class Model(nn.Module):
    def __init__(self, node_size, features, hidden, heads):
        super(Model, self).__init__()
        #self.gat1=GAT(in_feature=features,hidden_feature=64,out_feature=16,attention_layers=1,dropout=0.1,alpha=0.01)
        #self.gat1=GATConv(features,hidden,heads)
        #n_feat, n_hid, n_class, dropout, alpha, n_heads
        #self.gat1 = GAT(n_feat=features,n_hid=8,n_class=8,dropout=0,alpha=0.1,n_heads=3)
        self.gat1=GATConv(features,hidden,heads=heads)
        self.gat2=GATConv(hidden*heads,32)
        #self.rnn = RNN(8,2,1)
        self.rnn=nn.LSTM(44,RNN_OUT,RNN_LAYER)
        self.fc1=nn.Linear(64*21,256)
        self.fc2=nn.Linear(256,64)
        self.fc3=nn.Linear(64,1)
        self.fc4=nn.Linear(node_size,1)
        self.init_linear()
        self.h0 = torch.randn(size=(RNN_LAYER,RNN_OUT)).to(device)
        self.c0 = torch.randn(size=(RNN_LAYER,RNN_OUT)).to(device)
        self.training=True
        self.hn=None
        self.cn=None
    def init_hidden(self):
        self.h0 = torch.randn(size=(RNN_LAYER, RNN_OUT)).to(device)
        self.c0 = torch.randn(size=(RNN_LAYER, RNN_OUT)).to(device)
    def init_GAT(self):
        for name,param in self.gat1.named_parameters():
            if 'weight' in name:
                torch.nn.init.xavier_normal_(param.data)
            elif 'bias' in name:
                torch.nn.init.constant_(param.data, 0.5)
        for name,param in self.gat2.named_parameters():
            if 'weight' in name:
                torch.nn.init.xavier_normal_(param.data)
            elif 'bias' in name:
                torch.nn.init.constant_(param.data, 0)
    def init_linear(self):
        for m in self.modules():
            if isinstance(m,nn.Linear):
                #torch.nn.init.normal_(m.weight.data, mean=0.0, std=1.0)
                torch.nn.init.xavier_normal_(m.weight.data, gain=1.0)

                #nn.init.normal_(m.weight.data)
    def init_weight(self):
        for m in self.modules():
            if isinstance(m,nn.Linear):
                nn.init.normal_(m.weight.data)
        # for name,param in self.rnn.named_parameters():
        #     if 'weight' in name:
        #         torch.nn.init.xavier_normal_(param.data)
        #     elif 'bias' in name:
        #         torch.nn.init.constant_(param.data, 0.5)
        for name,param in self.gat1.named_parameters():
            if 'weight' in name:
                torch.nn.init.xavier_normal_(param.data)
            elif 'bias' in name:
                torch.nn.init.constant_(param.data, 0.5)
        # for name,param in self.gat2.named_parameters():
        #     if 'weight' in name:
        #         torch.nn.init.xavier_normal_(param.data)
        #     elif 'bias' in name:
        #         torch.nn.init.constant_(param.data, 0)
        self.init_hidden()

    def forward(self, x, edge_index):
        # x = self.gat1(x, edge_index)
        # x=F.leaky_relu(x)
        # x=self.gat2(x,edge_index)
        # x=F.leaky_relu(x)
        #x = F.dropout(x, p=0.5, training=self.training)
        #x = nn.ELU()(x)
        #x, (self.h0,self.c0)= self.rnn(x, (self.h0 ,self.c0))
        # self.h0=torch.FloatTensor(hn).to(device)
        # self.c0=torch.FloatTensor(cn).to(device)
        # x = x[len(x) - 1]
        # x = F.sigmoid(x)
        #x=self.fc4(x)
        x=F.normalize(x, dim=0)
        x=self.fc1(x)
        x=F.relu(x)
        x=F.normalize(x,dim=0)
        x=self.fc2(x)
        x=F.relu(x)
        x=F.normalize(x,dim=0)
        x=self.fc3(x)

        # #x=self.fc3(x)
        # x = self.fc1(x)
        # x = F.relu(x)
        # x=self.fc2(x)
        # x=F.relu(x)
        # x=self.fc3(x)
        # x=F.relu(x)
        #x = torch.squeeze(x, 1)
        #x = self.fc3(x)
        #x =F.relu(x)
        #x=F.dropout(x,p=0.5,training=self.training)
        return x
