import torch
import torch_geometric
from torch.nn import Sequential, Linear, ReLU, BatchNorm1d, Dropout
import torch.nn.functional as F
from torch_geometric.nn import  GINConv, global_add_pool,GATConv
from torch.nn import Sequential, Linear, ReLU
	
class GAT_GIN(torch.nn.Module):
	def __init__(self):
		super(GAT_GIN, self).__init__()
		#GAT-network
		self.gat1 = GATConv(373, 256,heads=3)
		self.bn11 = BatchNorm1d(256*3)
		self.gat2 = GATConv(256*3, 128,heads=3)
		self.bn12 = BatchNorm1d(128*3)
		self.gat3 = GATConv(128*3, 128,heads=3)
		self.bn13 = BatchNorm1d(128*3)
		#GIN-network
		fc_gin1=Sequential(Linear(373, 256), ReLU(), Linear(256, 256))
		self.gin1 = GINConv(fc_gin1)
		self.bn21 = BatchNorm1d(256)
		fc_gin2=Sequential(Linear(256, 128), ReLU(), Linear(128, 128))
		self.gin2 = GINConv(fc_gin2)
		self.bn22 = BatchNorm1d(128)
		fc_gin3=Sequential(Linear(128, 64), ReLU(), Linear(64, 64))
		self.gin3 = GINConv(fc_gin3)
		self.bn23 = BatchNorm1d(64)
		#FCN
		self.fc1=Linear(128*3 + 64, 256)
		self.fc2=Linear(256, 64)
		self.fc3=Linear(64, 1)
	def forward(self, data):
		x, edge_index = data.x, data.edge_index
		y=x
		z=x
		#GAT-representation
		y = F.relu(self.gat1(y, edge_index))
		y = self.bn11(y)
		y = F.relu(self.gat2(y, edge_index))
		y = self.bn12(y)
		y = F.relu(self.gat3(y, edge_index))
		y = self.bn13(y)
		y = global_add_pool(y, data.batch)
		#GIN-representation
		z = F.relu(self.gin1(z, edge_index))
		z = self.bn21(z)
		z = F.relu(self.gin2(z, edge_index))
		z = self.bn22(z)
		z = F.relu(self.gin3(z, edge_index))
		z = self.bn23(z)
		z = global_add_pool(z, data.batch)
		#Concatinating_representations
		cr=torch.cat((y,z),1)
		cr = F.relu(self.fc1(cr))
		cr = F.dropout(cr, p=0.2, training=self.training)
		cr = F.relu(self.fc2(cr))
		cr = F.dropout(cr, p=0.2, training=self.training)
		cr = self.fc3(cr)
		cr = F.relu(cr).view(-1)
		return cr  


