from torch_geometric.nn import SAGEConv, GCNConv, HypergraphConv
from torch_geometric.nn.pool.topk_pool import topk,filter_adj
import torch
from typing import Optional
from datasets import MyDataset, DesignSet
from torch import Tensor
from torch.nn import Parameter
import torch.nn.functional as F
from torch_scatter import scatter_add, scatter
from torch_geometric.utils import softmax
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.data import DataLoader
from tqdm import tqdm
from torch.utils.data import random_split, Subset, ConcatDataset
import pylab
import os.path as osp
import numpy as np
def diameter(hyperedge_index):
    num_nodes = hyperedge_index[0].max().item() + 1
    num_edges = hyperedge_index[1].max().item() + 1

    maxx = 1000
    for i in tqdm(range(0,num_nodes)):

        vec = torch.zeros(num_nodes,dtype=torch.long).cuda()
        vec[i] = 1
        cnt = 0
        while vec.sum()/num_nodes <= 0.9:
            vec = vec.index_select(-1, hyperedge_index[0])
            vec = scatter(vec,hyperedge_index[1], dim=0, dim_size=num_edges,reduce='max')
            vec = vec.index_select(-1,hyperedge_index[1])
            vec = scatter(vec,hyperedge_index[1], dim=0, dim_size=num_edges,reduce='max')
            cnt += 1
            if cnt > maxx:
                break
        if cnt < maxx:
            maxx = cnt 
            print(maxx)

    print(maxx)


def k_shortest(hyperedge_index,macro_index):
    macro_num = int(len(macro_index))
    node_num = int(hyperedge_index[0].max()+1)
    edge_num = int(hyperedge_index[1].max()+1)
    shortest_length = []
    for i in range(0,macro_num):
        macro_id = macro_index[i]
        steps = torch.zeros(node_num,dtype=torch.long).to('cuda:1')
        visited = torch.zeros(node_num,dtype=torch.long).to('cuda:1')
        visited[macro_id] = 1
        cnt = 0
        # newly added nodes mask
        new_node = torch.zeros(node_num,dtype=torch.long).to('cuda:1')
        new_node[macro_id] = 1
        steps[macro_id] = 10
        while torch.sum(new_node) > 0 :
            cnt += 1
            tmp_vec = visited.index_select(-1, hyperedge_index[0])
            tmp_vec = scatter(tmp_vec,hyperedge_index[1], dim=0, dim_size=edge_num,reduce='max')
            tmp_vec = tmp_vec.index_select(-1,hyperedge_index[1])
            tmp_vec = scatter(tmp_vec,hyperedge_index[0], dim=0, dim_size=node_num,reduce='max')
            new_node = (tmp_vec - visited).long()
            steps[new_node.bool()] = cnt
            visited = tmp_vec
        #print(visited)
        steps = torch.where(visited == 0, node_num + edge_num,steps)
        shortest_length.append(steps.view(node_num,1))
    shortest = torch.cat(shortest_length,dim=1)
    return shortest
        

def getset(dataset,design,train_ratio=0.8):
    if design == 'all':
        train_designs =  ['des_perf_a', 'edit_dist_a', 'fft_a','fft_b','matrix_mult_a']
        test_designs = ['matrix_mult_b','matrix_mult_c','pci_bridge32_b']
        train_sets = []
        test_sets = []
        num_training = 0
        num_testing = 0
        for design in train_designs:
            train_sets.append(Subset(dataset,range(dataset.ptr[design],
                                                    dataset.ptr[design] + dataset.file_num[design])))
            num_training += dataset.file_num[design]
        for design in test_designs:
            test_sets.append(Subset(dataset,range(dataset.ptr[design],
                                                    dataset.ptr[design] + dataset.file_num[design])))
            num_testing += dataset.file_num[design]
    
        train_set = ConcatDataset(train_sets)
        test_set = ConcatDataset(test_sets)
    else:
        num_training = int(dataset.file_num[design] * train_ratio)
        num_testing = dataset.file_num[design] -  num_training
        design_set = Subset(dataset,range(dataset.ptr[design],
                                         dataset.ptr[design] + dataset.file_num[design]))
        train_set, test_set = random_split(design_set,[num_training,num_testing])
    print("Total %d training data, %d testing data."%(num_training,num_testing),flush=True)
    return train_set, test_set, num_training, num_testing


def label_stat(dataset,design):
    train,_,num,_ =getset(dataset=dataset,design=design,train_ratio=1)
    labels = []
    for i , data in enumerate(train):
        labels.append(float(data.y))
    pylab.hist(labels,dataset.classes)
    pylab.xlabel('Range')
    pylab.ylabel('Count')
    pylab.savefig('stat/{}.png'.format(design))
    pylab.cla()


def standardization(x):
    mean = torch.mean(x)
    std = torch.std(x)
    return (x-mean)/std

def normalization(x):
    minn = torch.min(x)
    maxx = torch.max(x)
    return (x-minn)/(maxx-minn)

def pre_norm(data):
    x, edge_index,y,pin_features, macro_pos,macro_index,picture =data.x, data.edge_index, data.y, data.pin_features, data.macro_pos, data.macro_index, data.picture
    a , b  = standardization(x[:,0]), standardization(x[:,1])
    x[:,0] , x [:, 1] = a, b
    picture = standardization(picture)


def plot(labels,name):
    pylab.hist(labels,20)
    pylab.xlabel('Range')
    pylab.ylabel('Count')
    pylab.savefig('{}.png'.format(name))


def GasussianKernel(size=21):
    if size%2 == 0:
        size = size + 1 

    cx = size//2 
    cy = size//2
    matrix = np.zeros((size,size),np.float32)
    for i in range(0,size):
        for j in range(0,size):
            matrix[i,j] = np.exp(-(i-cx)*(i-cx)-(j-cy)*(j-cy))
    return matrix


root = 'data'
sets = DesignSet(root)
print(sets.file_num)
for design in tqdm(sets.raw_file_names):
    #label_stat(sets,design)
    ptr = sets.ptr[design]
    data = sets[ptr].to('cuda:1')
    hyperedge_index, macro_index = data.edge_index, data.macro_index
    shortest = k_shortest(hyperedge_index,macro_index)
    #print(shortest)
    #weight = 1/shortest
    #summ = weight.sum(dim=-1).view(-1,1).repeat(1,int(len(macro_index)))
    #weight = weight/summ
    #pos = torch.matmul(weight,macro_pos)
    #pos[macro_index] = macro_pos
    np.savetxt('{}/raw/{}/dist2macro.txt'.format(root,design),shortest.cpu().numpy())


