import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from gnn_transformer.own_transformer import Transformer
class GNNNet(torch.nn.Module):
    # 网络初始化
    def __init__(self, flow_in_features=5,node_in_features=2,link_in_features=3,gnn_out_features=20,embedding_out_dim=32,out_features=1):
        '''
        :param input_feature: 每个节点属性的维度
        :param num_class: 节点所属的类别数
        '''
        super(GNNNet, self).__init__()
        # self.input_feature = input_feature
        # self.num_classes = num_class
        # 图卷积层
        # 出自于论文《Semi-Supervised Classification with Graph Convolutional Networks》
        # 具体计算公式可查阅论文
        # self.conv1 = GCNConv(input_feature, out_channels=32)
        # self.conv2 = GCNConv(32, num_class)
        self.flow_in_features=flow_in_features
        self.node_in_features = node_in_features
        self.link_in_features = link_in_features
        self.gnn_out_features= gnn_out_features
        self.out_features=out_features
        self.embedding_out_dim=embedding_out_dim
        # 自己的GNN架构
        # embedding不然后面的维度对不上
        self.flow_embedding = nn.Linear(in_features=self.flow_in_features, out_features=self.embedding_out_dim)
        self.node_embedding = nn.Linear(in_features=self.node_in_features, out_features=self.embedding_out_dim)
        self.link_embedding = nn.Linear(in_features=self.link_in_features, out_features=self.embedding_out_dim)

        self.flow_gru = nn.GRU(input_size=self.embedding_out_dim,hidden_size=self.embedding_out_dim)
        self.node_gru = nn.GRU(input_size=self.embedding_out_dim, hidden_size=self.embedding_out_dim)
        self.link_gru = nn.GRU(input_size=self.embedding_out_dim, hidden_size=self.embedding_out_dim)
        # 将gru替换成transformer，有问题，需要改
        self.flow_transformer = Transformer(zh_input_size=32,
                                en_input_size=32,
                                output_size=self.embedding_out_dim,
                                embedding_dim=self.embedding_out_dim,
                                layer_num=4,
                                head_num=4,
                                padding_idx=1,
                                bos_idx=2,
                                dropout=0.3,
                                max_length=100,
                                teacher_forcing_ratio=0.2)
        self.node_transformer = Transformer(zh_input_size=32,
                                en_input_size=32,
                                output_size=self.embedding_out_dim,
                                embedding_dim=self.embedding_out_dim,
                                layer_num=4,
                                head_num=4,
                                padding_idx=1,
                                bos_idx=2,
                                dropout=0.3,
                                max_length=100,
                                teacher_forcing_ratio=0.2)
        self.link_transformer = Transformer(zh_input_size=32,
                                en_input_size=32,
                                output_size=self.embedding_out_dim,
                                embedding_dim=self.embedding_out_dim,
                                layer_num=4,
                                head_num=4,
                                padding_idx=1,
                                bos_idx=2,
                                dropout=0.3,
                                max_length=100,
                                teacher_forcing_ratio=0.2)
        
        # self.flow_liner=nn.Linear(in_features=self.embedding_out_dim,out_features=self.embedding_out_dim)
        # self.node_liner = nn.Linear(in_features=self.embedding_out_dim, out_features=self.embedding_out_dim)
        # self.link_liner = nn.Linear(in_features=self.embedding_out_dim, out_features=self.embedding_out_dim)
        self.relu=nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        self.output1=nn.Linear(in_features=self.embedding_out_dim, out_features=self.out_features)
        self.output2 = nn.Linear(in_features=16, out_features=16)
        self.output = nn.Linear(in_features=16, out_features=self.out_features)

    # 前向传递过程
    def forward(self, data):
        # 我的GNN流程
        node=data[0]
        link=data[1]
        flow=data[2]
        node=F.normalize(node.float(), p=1, dim=1)
        link=F.normalize(link.float(), p=1, dim=1)
        flow=F.normalize(flow.float(), p=1, dim=1)

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        # print('model:{}'.format(device))
        node=node.to(device)
        link=link.to(device)
        flow=flow.to(device)
        # embedding
        flow_embedding=self.relu(self.flow_embedding(flow))
        node_embedding=self.relu(self.node_embedding(node))
        link_embedding=self.relu(self.link_embedding(link))
        # flow=flow_embedding+node_embedding.mean()
        # node=node_embedding+link_embedding.mean()
        # link=link_embedding+flow_embedding.mean()
        flow=flow_embedding
        node=node_embedding
        link=link_embedding
        # print(flow_embedding[0])
        # print(node_embedding[0])
        # print(link_embedding[0])

        for _ in range(0,8):
            # Liner版本
            # flow_liner_out=self.relu(self.flow_liner(flow))
            # node_liner_out=self.relu(self.node_liner(node))
            # link_liner_out=self.relu(self.link_liner(link))

            # GRU版本
            flow_out,flow_hidden=self.flow_gru(flow)
            flow_gru_out = self.relu(flow_out)
            node_out,node_hidden=self.node_gru(node)
            node_gru_out=self.relu(node_hidden)
            link_out,link_hidden=self.link_gru(link)
            link_gru_out=self.relu(link_out)

            # Transformer版本,有问题需要改transformer
            # flow_out= self.flow_transformer(flow,flow)
            # flow_transformer_out = self.relu(flow_out)
            # node_out = self.node_transformer(node,node)
            # node_transformer_out = self.relu(node_out)
            # link_out = self.link_transformer(link,link)
            # link_transformer_out = self.relu(link_out)

            # 获取flow经过的node的索引
            node_index= self.get_node_by_flow(data)
            concat_node = node_gru_out[0]
            # concat_node = node_transformer_out[0]
            # 筛选flow经过的node的特征
            added_node = None
            for node_index_by_flow in node_index[0]:
                # 删掉node_index_by_flow中的-1
                node_index_by_flow = [i for i in node_index_by_flow if (len(node_index_by_flow)>0 and i!=-1)]
                if added_node==None:
                    added_node = torch.mean(concat_node[node_index_by_flow, :],0)
                    added_node=added_node.unsqueeze(0)
                else:
                    unsqueeze=torch.mean(concat_node[node_index_by_flow, :],0).unsqueeze(0)
                    added_node=torch.cat((added_node,unsqueeze),0)

            # 获取flow经过的link的索引
            link_index= self.get_link_by_flow(data)
            concat_link = link_gru_out[0]
            # concat_link = link_transformer_out[0]
            # 筛选flow经过的link的特征
            added_link = None
            for link_index_by_flow in link_index:
                if added_link==None:
                    added_link = torch.mean(concat_link[link_index_by_flow, :],0)
                    added_link= added_link.unsqueeze(0)
                else:
                    unsqueeze=torch.mean(concat_link[link_index_by_flow, :],0).unsqueeze(0)
                    added_link=torch.cat((added_link,unsqueeze),0)

            # 获取穿过node的flow的索引
            flow_index=self.get_flow_by_node(data)
            concat_flow = flow_gru_out[0]
            # concat_flow = flow_transformer_out[0]
            # 筛选穿过node的flow的特征
            added_flow = None
            for flow_index_by_node in flow_index:
                if added_flow==None:
                    added_flow = torch.mean(concat_flow[flow_index_by_node, :],0)
                    added_flow= added_flow.unsqueeze(0)
                else:
                    unsqueeze=torch.mean(concat_flow[flow_index_by_node, :],0).unsqueeze(0)
                    added_flow=torch.cat((added_flow,unsqueeze),0)

            # 获取link的结尾节点的行索引
            node_index= self.get_node_by_link(data)
            concat_node = node_gru_out[0]
            # concat_node = node_transformer_out[0]
            # 筛选link结尾结点的node的特征
            added_node_by_link=None
            for node_index_by_link in node_index:
                if added_node_by_link==None:
                    added_node_by_link = torch.mean(concat_node[node_index_by_link, :],0)
                    added_node_by_link= added_node_by_link.unsqueeze(0)
                else:
                    unsqueeze=torch.mean(concat_node[node_index_by_link, :],0).unsqueeze(0)
                    added_node_by_link=torch.cat((added_node_by_link,unsqueeze),0)

            # flow=flow_gru_out+node_gru_out+flow_gru_out均值
            flow = flow_gru_out+added_node+added_link
            # flow = flow_transformer_out + added_node + added_link
            # node=node_gru_out+flow_gru_out均值
            node = node_gru_out + added_flow
            # node = node_transformer_out + added_flow
            # link=link_gru_out+flow_gru_out均值
            link = link_gru_out + added_node_by_link
            # link = link_transformer_out + added_node_by_link

        output=self.output1(flow)
        # output2 = self.relu(self.output2(output1))
        # output = self.relu(self.output(output2))
        return output

    # 写麻烦了，以后再简化
    def get_node_by_flow(self,data):
        res_list = []
        for flow in data[2]:
            temp_list=[]
            for node_index in flow:
                temp_list.append(list(map(int, node_index.tolist())))
            res_list.append(temp_list)
        return res_list


    def get_link_by_flow(self,data):
        res_list = []
        # 遍历每个flow
        # for flow in data[2][0]:
        # 删掉-1
        data_flow=[i for i in data[2][0].tolist() if (len(data[2][0].tolist()) > 0 and i != -1)]
        for flow in data_flow:
            # 获取flow中每个link的索引，二维的（link的起始节点，link的结尾节点）
            link_index=[]
            for i,_ in enumerate(flow, 0):
                link_index.append((flow[i:i+2]))
            link_index.pop()
            # 找到（link的起始节点，link的结尾节点）对应行索引
            link_row_index=[]
            # print(data[1][0])
            for i,link in enumerate(data[1][0],0):
                # print((int(link[0].item()), int(link[1].item())))
                if [int(link[0].item()),int(link[1].item())] in link_index:
                    link_row_index.append(i)
                else:
                    a=1
            res_list.append(link_row_index)
        return res_list


    def get_flow_by_node(self,data):
        res_list=[]
        data_node=data[0][0].tolist()
        data_flow=data[2][0].tolist()
        # 遍历每个node
        for node_row_index,node in enumerate(data_node,0):
            # 获取穿过node的flow的行索引
            flow_row_index_list=[]
            for flow_row_index,flow in enumerate(data_flow,0):
                if (node_row_index in flow):
                    flow_row_index_list.append(flow_row_index)
            res_list.append(flow_row_index_list)
        return res_list


    def get_node_by_link(self,data):
        res_list = []
        data_link = data[1][0].tolist()
        for link in data_link:
            res_list.append([link[0],link[1]])
        return res_list