import torch
from torch_geometric.nn import HypergraphConv
from torch_geometric.nn import GraphConv, TopKPooling
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
from torch_geometric.nn import SAGPooling as Pool
from torch_geometric.nn.norm import BatchNorm
import torch.nn.functional as F
import os
import re
import inspect
import os.path as osp
from uuid import uuid1
from itertools import chain
from inspect import Parameter
from typing import List, Optional, Set
from torch_geometric.typing import Adj, Size
import pdb
from torch import Tensor
from jinja2 import Template
from torch_sparse import SparseTensor
from torch_scatter import gather_csr, scatter, segment_csr
from .layers import HyperConv, HyperSAGPool, AttHyperConv, AttHypergraphConv
from torchvision import models
import torch.nn as nn

class SAGPoolh(torch.nn.Module):
    def __init__(self,args):
        super(SAGPoolh, self).__init__()
        self.args = args
        self.num_features = args.num_features
        self.nhid = args.nhid
        self.num_classes = args.num_classes
        self.use_real_pos = args.use_real_pos
        self.negative_slope = 0.1
        self.pooling_ratio = args.pooling_ratio
        self.dropout_ratio = args.dropout_ratio

        self.conv1 = nn.Sequential( 
            HyperConv(self.num_features, self.nhid),
            nn.LeakyReLU(negative_slope=self.negative_slope))

        self.conv2 = nn.Sequential( 
            HypergraphConv(self.nhid, self.nhid),
            nn.LeakyReLU(negative_slope=self.negative_slope))

        self.conv3 =  nn.Sequential( 
            HypergraphConv(self.nhid, self.nhid),
            nn.LeakyReLU(negative_slope=self.negative_slope))

        self.mlp = nn.Sequential(
            nn.Linear(self.nhid*2, self.nhid),
            nn.LeakyReLU(negative_slope=self.negative_slope),
            nn.Dropout(p=self.dropout_ratio),
            nn.Linear(self.nhid, self.nhid//2),
            nn.LeakyReLU(negative_slope=self.negative_slope),
            nn.Linear(self.nhid//2, self.num_classes)
        )

    def forward(self, data):
        x, edge_index, pin_feature, batch = data.x, data.edge_index, data.pin_feature, data.batch
        if self.use_real_pos:
            fake_pos = data.cell_pos
        else:
            fake_pos = data.fake_pos
        macro_index = data.macro_index
        # add macro pos
        macro_batch = batch[macro_index]

        ismacro = x.new_zeros((x.shape[0],1))
        ismacro[macro_index] = 1 

        x= torch.cat((x,fake_pos,ismacro),dim=-1)

        x = self.conv1((x, edge_index, pin_feature))
        x1 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = self.conv2((x, edge_index))
        x2 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = self.conv3((x, edge_index))
        x3 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3

        x = self.mlp(x)
        return x

class SAGPoolh_feature(torch.nn.Module):
    def __init__(self,args):
        super(SAGPoolh_feature, self).__init__()
        self.args = args
        self.num_features = args.num_features
        self.nhid = args.nhid
        self.num_classes = args.num_classes
        #self.max_edge_nums
        self.pooling_ratio = args.pooling_ratio
        self.dropout_ratio = args.dropout_ratio

        self.conv1 = HyperConv(self.num_features, self.nhid)
        self.mlp1 = nn.Sequential(
        nn.Linear(self.nhid,self.nhid),
        #nn.Dropout(p=self.dropout_ratio),
        #nn.ReLU(True),
        #nn.Linear(self.nhid,self.nhid),
        nn.ReLU(True),
        nn.Linear(self.nhid,self.nhid),
        #nn.Dropout(p=self.dropout_ratio),
        #nn.ReLU(True),
        #nn.Linear(self.nhid,self.nhid),
        nn.ReLU(True),
        nn.Linear(self.nhid,self.nhid),
        #nn.Dropout(p=self.dropout_ratio),
        #nn.ReLU(True),
        #nn.Linear(self.nhid,self.nhid),
        nn.ReLU(True))
        #self.pool1 = HyperSAGPool(self.nhid, ratio=self.pooling_ratio)
        self.conv2 = HypergraphConv(self.nhid, self.nhid)
        self.mlp2 = nn.Sequential(
        nn.Linear(self.nhid,self.nhid),
        #nn.Dropout(p=self.dropout_ratio),
        #nn.ReLU(True),
        #nn.Linear(self.nhid,self.nhid),
        nn.ReLU(True),
        nn.Linear(self.nhid,self.nhid),
        #nn.Dropout(p=self.dropout_ratio),
        #nn.ReLU(True),
        #nn.Linear(self.nhid,self.nhid),
        nn.ReLU(True),
        nn.Linear(self.nhid,self.nhid),
        #nn.Dropout(p=self.dropout_ratio),
        #nn.ReLU(True),
        #nn.Linear(self.nhid,self.nhid),
        nn.ReLU(True))
        #self.pool2 = HyperSAGPool(self.nhid, ratio=self.pooling_ratio)
        self.conv3 = HypergraphConv(self.nhid, self.nhid)
        #self.pool3 = HyperSAGPool(self.nhid, ratio=self.pooling_ratio)

    def forward(self, data):
        #x, edge_index, pin_feature, batch = data.x, data.edge_index, data.pin_feature, data.batch
        x, edge_index, pin_feature, batch, macro_index, macro_pos = data.x, data.edge_index, data.pin_feature, data.batch, data.macro_index,data.macro_pos
        # add macro pos
        tmp = macro_pos.new_ones((macro_pos.shape[0],1))
        macro_pos = torch.cat((macro_pos,tmp),dim=-1)
        pos = x.new_zeros((x.shape[0],macro_pos.shape[1]))
        pos[macro_index] = macro_pos

        x= torch.cat((x,pos),dim=-1)

        x = F.relu(self.conv1(x, edge_index, pin_feature))
        #x, edge_index, _, batch, _ = self.pool1(x, edge_index, None, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x = self.mlp1(x)

        x = F.relu(self.conv2(x, edge_index))
        #x, edge_index, _, batch, _ = self.pool2(x, edge_index, None, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = self.mlp2(x)

        x = F.relu(self.conv3(x, edge_index))
        #x, edge_index, _, batch, _ = self.pool3(x, edge_index, None, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3

        return x

class SAGPoolh_comb(torch.nn.Module):
    def __init__(self,args):
        super(SAGPoolh_comb, self).__init__()
        self.args = args
        self.num_features = args.num_features
        self.nhid = args.nhid
        self.num_classes = args.num_classes
        #self.max_edge_nums
        self.pooling_ratio = args.pooling_ratio
        self.dropout_ratio = args.dropout_ratio

        self.graph_feature = SAGPoolh_feature(args=args)
        self.photo_feature = models.vgg16(pretrained=True)
        self.photo_feature.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, self.nhid*2),  
        )
        self.predict = nn.Sequential(
            nn.Linear(self.nhid*4, self.nhid),
            nn.ReLU(True),
            nn.Dropout(p=self.dropout_ratio),
            nn.Linear(self.nhid, self.nhid//2),
            nn.ReLU(True),
            nn.Dropout(p=self.dropout_ratio),
            nn.Linear(self.nhid//2, self.num_classes)
        )

    def forward(self, data):
        #x, edge_index, pin_feature, batch = data.x, data.edge_index, data.pin_feature, data.batch
        p = torch.cat([data.picture,data.picture,data.picture],dim=1)

        x=self.graph_feature(data)
        p=self.photo_feature(p)

        x = torch.cat([x, p], dim=1)

        x = self.predict(x)

        return x

class MLPlus(torch.nn.Module):
    def __init__(self,args):
        super(MLPlus, self).__init__()
        self.args = args
        self.num_features = args.num_features
        self.nhid = args.nhid
        self.num_classes = args.num_classes
        #self.max_edge_nums
        self.pooling_ratio = args.pooling_ratio
        self.dropout_ratio = args.dropout_ratio

        self.conv1 = HyperConv(self.num_features, self.nhid)
        self.mlp1 = nn.Sequential(
        nn.Linear(self.nhid,self.nhid),
        #nn.Dropout(p=self.dropout_ratio),
        #nn.ReLU(True),
        #nn.Linear(self.nhid,self.nhid),
        nn.ReLU(True))

        self.conv2 = HypergraphConv(self.nhid, self.nhid)
        self.mlp2 = nn.Sequential(
        nn.Linear(self.nhid,self.nhid),
        #nn.Dropout(p=self.dropout_ratio),
        #nn.ReLU(True),
        #nn.Linear(self.nhid,self.nhid),
        nn.ReLU(True))

        self.conv3 = HypergraphConv(self.nhid, self.nhid)

        self.regress = nn.Sequential(
        nn.Linear(self.nhid*2, self.nhid),
        nn.Dropout(p=self.dropout_ratio),
        nn.ReLU(True),
        #nn.BatchNorm1d(self.nhid),
        nn.Linear(self.nhid, self.nhid//2),
        nn.Dropout(p=self.dropout_ratio),
        nn.ReLU(True),
        #nn.BatchNorm1d(self.nhid//2),
        nn.Linear(self.nhid//2, self.num_classes))

    def forward(self, data):
        #x, edge_index, pin_feature, batch = data.x, data.edge_index, data.pin_feature, data.batch
        x, edge_index, pin_feature, batch, macro_index, macro_pos = data.x, data.edge_index, data.pin_feature, data.batch, data.macro_index,data.macro_pos
        # add macro pos
        tmp = macro_pos.new_ones((macro_pos.shape[0],1))
        macro_pos = torch.cat((macro_pos,tmp),dim=-1)
        pos = x.new_zeros((x.shape[0],macro_pos.shape[1]))
        pos[macro_index] = macro_pos

        x= torch.cat((x,pos),dim=-1)

        x = F.relu(self.conv1(x, edge_index, pin_feature))
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = self.mlp1(x)

        x = F.relu(self.conv2(x, edge_index))
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = self.mlp2(x)

        x = F.relu(self.conv3(x, edge_index))
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3

        x = self.regress(x)

        return x.view(-1)

class CNN(torch.nn.Module):
    def __init__(self,args):
        super(CNN, self).__init__()
        self.args = args
        self.num_classes = args.num_classes
        self.net = models.vgg16(pretrained=True)
        self.net.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, 512),  
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(512,128),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(128,self.num_classes)
        )



    def forward(self, data):
        x = data.picture
        x = self.net(x)
        return x

class ANet(torch.nn.Module):
    def __init__(self,args):
        super(ANet, self).__init__()
        self.args = args
        self.num_features = args.num_features 
        self.nhid = args.nhid
        self.num_classes = args.num_classes
        self.use_real_pos = args.use_real_pos
        self.attention = args.attention
        self.negative_slope = 0.1
        self.pooling_ratio = args.pooling_ratio
        self.dropout_ratio = args.dropout_ratio

        self.conv1 = AttHyperConv(self.num_features, self.nhid,use_attention=self.attention)
        self.conv2 = AttHypergraphConv(self.nhid, self.nhid,use_attention=self.attention)
        self.conv3 = AttHypergraphConv(self.nhid, self.nhid,use_attention=self.attention)

        self.mlp = nn.Sequential(
            nn.Linear(self.nhid*2, self.nhid),
            nn.LeakyReLU(negative_slope=self.negative_slope),
            nn.Dropout(p=self.dropout_ratio),
            nn.Linear(self.nhid, self.nhid//2),
            nn.LeakyReLU(negative_slope=self.negative_slope),
            nn.Linear(self.nhid//2, self.num_classes)
        )

    def forward(self, data):
        
        x, edge_index, pin_feature, batch = data.x, data.edge_index, data.pin_feature, data.batch
        if self.use_real_pos:
            fake_pos = data.cell_pos
        else:
            fake_pos = data.fake_pos
        macro_index = data.macro_index
        # add macro pos
        macro_batch = batch[macro_index]

        ismacro = x.new_zeros((x.shape[0],1))
        ismacro[macro_index] = 1 

        x= torch.cat((x,fake_pos,ismacro),dim=-1)

        x = F.leaky_relu(self.conv1(x, edge_index, pin_feature),negative_slope=self.negative_slope)
        
        x1 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = F.leaky_relu(self.conv2(x, edge_index),negative_slope=self.negative_slope)

        x2 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = F.leaky_relu(self.conv3(x, edge_index),negative_slope=self.negative_slope)

        x3 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3

        x = self.mlp(x)
        return x

class ResNet(torch.nn.Module):
    def __init__(self,args):
        super(ResNet, self).__init__()
        self.args = args
        self.num_features = args.num_features
        self.nhid = args.nhid
        self.num_classes = args.num_classes
        self.use_real_pos = args.use_real_pos
        self.negative_slope = 0.1
        self.pooling_ratio = args.pooling_ratio
        self.dropout_ratio = args.dropout_ratio

        self.conv1 = HyperConv(self.num_features, self.nhid)
        self.conv2 = HypergraphConv(self.nhid, self.nhid)
        self.conv3 = HypergraphConv(self.nhid, self.nhid)
        self.conv4 = HypergraphConv(self.nhid, self.nhid)

        self.mlp = nn.Sequential(
            nn.Linear(self.nhid*2, self.nhid),
            nn.LeakyReLU(negative_slope=self.negative_slope),
            nn.Dropout(p=self.dropout_ratio),
            nn.Linear(self.nhid, self.nhid//2),
            nn.LeakyReLU(negative_slope=self.negative_slope),
            nn.Linear(self.nhid//2, self.num_classes)
        )

    def forward(self, data):
        
        x, edge_index, pin_feature, batch = data.x, data.edge_index, data.pin_feature, data.batch
        if self.use_real_pos:
            fake_pos = data.cell_pos
        else:
            fake_pos = data.fake_pos
        macro_index = data.macro_index
        # add macro pos
        macro_batch = batch[macro_index]

        ismacro = x.new_zeros((x.shape[0],1))
        ismacro[macro_index] = 1 

        x= torch.cat((x,fake_pos,ismacro),dim=-1)

        x = F.leaky_relu(self.conv1(x, edge_index, pin_feature),negative_slope=self.negative_slope)
        
        x1 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = F.leaky_relu(self.conv2(x, edge_index) + x,negative_slope=self.negative_slope)

        x2 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = F.leaky_relu(self.conv3(x, edge_index) + x,negative_slope=self.negative_slope)

        x3 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = F.leaky_relu(self.conv4(x, edge_index) + x,negative_slope=self.negative_slope)

        x4 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3 + x4

        x = self.mlp(x)
        return x

class PosNet(torch.nn.Module):
    def __init__(self,args):
        super(PosNet, self).__init__()
        self.args = args
        self.num_features = args.num_features
        self.nhid = args.nhid
        self.num_classes = args.num_classes

        self.pooling_ratio = args.pooling_ratio
        self.dropout_ratio = args.dropout_ratio

        self.conv1 = HyperConv(self.num_features, self.nhid)
        self.conv2 = HypergraphConv(self.nhid, self.nhid)
        self.conv3 = HypergraphConv(self.nhid, self.nhid)
        self.lin1 = torch.nn.Linear(self.nhid, self.nhid//2)
        self.lin2 = torch.nn.Linear(self.nhid//2, self.nhid//8)
        self.lin3 = torch.nn.Linear(self.nhid//8, self.num_classes)

    def forward(self, data):
        
        x, edge_index, pin_feature, batch = data.x, data.edge_index, data.pin_feature, data.batch
        fake_pos = data.fake_pos
        macro_index = data.macro_index
        # add macro pos
        macro_batch = batch[macro_index]

        ismacro = x.new_zeros((x.shape[0],1))
        ismacro[macro_index] = 1 

        x = torch.cat((x,fake_pos,ismacro),dim=-1)

        x = F.relu(self.conv1(x, edge_index, pin_feature))
        #x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        #x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.relu(self.conv2(x, edge_index))
        #x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        #x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.relu(self.conv3(x, edge_index))
        #x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = self.lin3(x)
        return x

class WLNet(torch.nn.Module):
    def __init__(self,args):
        super(ANet, self).__init__()
        self.args = args
        self.num_features = args.num_features
        self.nhid = args.nhid
        self.num_classes = args.num_classes

        self.pooling_ratio = args.pooling_ratio
        self.dropout_ratio = args.dropout_ratio

        self.conv1 = HyperConv(self.num_features, self.nhid)
        self.conv2 = HypergraphConv(self.nhid, self.nhid)
        self.conv3 = HypergraphConv(self.nhid, self.nhid)

        self.lin1 = torch.nn.Linear(self.nhid*2, self.nhid)
        self.lin2 = torch.nn.Linear(self.nhid, self.nhid//2)
        self.lin3 = torch.nn.Linear(self.nhid//2, self.num_classes)

    def forward(self, data):
        
        x, edge_index = data.x, data.edge_index
        pin_feature, batch = data.pin_feature, data.batch
        macro_index, cell_pos = data.macro_index, data.cell_pos
        # add macro pos
        macro_batch = batch[macro_index]

        ismacro = x.new_zeros((x.shape[0],1))
        ismacro[macro_index] = 1 

        x = torch.cat((x,cell_pos,ismacro),dim=-1)

        x = F.relu(self.conv1(x, edge_index, pin_feature))
        
        x1 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index))

        x2 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index))

        x3 = torch.cat([gap(x[macro_index], macro_batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = self.lin3(x)
        return x