import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, global_max_pool as gmp


# GCN based model
class GCNNet(torch.nn.Module):
    def __init__(self, n_output=2, n_filters=32, embed_dim=128,num_features_xd=78, num_features_xt=25, output_dim=128, dropout=0.2):
        """
        :param n_output: 输出1维结果
        :param n_filters:
        :param embed_dim:
        :param num_features_xd:
        :param num_features_xt:
        :param output_dim:
        :param dropout:
        """
        super(GCNNet, self).__init__()

        # SMILES graph branch
        self.n_output = n_output
        """
        GCNConv(in_channels: int, out_channels: int, improved: bool = False, cached: bool = False,
         add_self_loops: bool = True, normalize: bool = True, bias: bool = True, **kwargs)
        in_channels (int)—每个输入示例的大小。
        out_channels (int)——每个输出示例的大小。
        improved (bool, optional) – 如果设置为真，层计算A^为A+2I。(default: False)->针对层计算而言
        cached (bool, optional) – 如果设置为真，该层将在第一次执行时缓存D^−1/2A^D^−1/2的计算，
        并将使用缓存的版本进行进一步执行。这个参数只能在转换学习场景中设置为真。(默认值:False)
        **kwargs (optional) – Additional arguments of torch_geometric.nn.conv.MessagePassing.
        """
        self.conv1 = GCNConv(num_features_xd, num_features_xd)
        self.conv2 = GCNConv(num_features_xd, num_features_xd*2)
        self.conv3 = GCNConv(num_features_xd*2, num_features_xd * 4)
        """
        class torch.nn.Linear(in_features, out_features, bias=True)
        对输入数据做线性变换：y=Ax+b
        参数：
        in_features - 每个输入样本的大小
        out_features - 每个输出样本的大小
        bias - 若设置为False，这层不会学习偏置。默认值：True
        形状：
        输入: (N,in_features)
        输出： (N,out_features)
        变量：
        weight -形状为(out_features x in_features)的模块中可学习的权值
        bias -形状为(out_features)的模块中可学习的偏置
        """
        self.fc_g1 = torch.nn.Linear(num_features_xd*4, 1024)
        self.fc_g2 = torch.nn.Linear(1024, output_dim)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)

        # protein sequence branch (1d conv)
        """ 
        torch.nn.Embedding(num_embeddings, embedding_dim)
        创建一个词嵌入模型，num_embeddings代表一共有多少个词，
        embedding_dim代表你想要为每个词创建一个多少维的向量来表示它
        """
        self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
        """
        class torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride=1, padding=0, 
        dilation=1, groups=1, bias=True)
        """
        self.conv_xt_1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=8)
        self.fc1_xt = nn.Linear(32*121, output_dim)

        # combined layers
        self.fc1 = nn.Linear(2*output_dim, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.out = nn.Linear(512, self.n_output)

        """最后一层输出Dense,采用sigmoid激活"""
        self.sigmoid=nn.Sigmoid()

    """
    forward(x: torch.Tensor, edge_index: Union[torch.Tensor, 
    torch_sparse.tensor.SparseTensor], edge_weight: Optional[torch.Tensor] = None) → torch.Tensor
    """
    def forward(self, data):
        # get graph input
        x, edge_index, batch = data.x, data.edge_index, data.batch
        # get protein input
        target = data.target

        x = self.conv1(x, edge_index)
        x = self.relu(x)

        x = self.conv2(x, edge_index)
        x = self.relu(x)

        x = self.conv3(x, edge_index)
        x = self.relu(x)
        """
        global_max_pool(x, batch, size: Optional[int] = None)
        x (Tensor) – 节点特征矩阵X
        batch (LongTensor) –批处理向量b，它将每个节点分配给一个特定的示例。
        size (int, optional) –-批处理向量长度B.如果没有给出则自动计算。
        Return type：Tensor
        """
        x = gmp(x, batch)       # global max pooling

        # flatten
        x = self.relu(self.fc_g1(x))
        x = self.dropout(x)
        x = self.fc_g2(x)
        x = self.dropout(x)

        # 1d conv layers
        embedded_xt = self.embedding_xt(target)
        conv_xt = self.conv_xt_1(embedded_xt)       #这里为什么没有3层卷积？
        # flatten
        xt = conv_xt.view(-1, 32 * 121)
        xt = self.fc1_xt(xt)

        # concat
        xc = torch.cat((x, xt), 1)
        # add some dense layers
        xc = self.fc1(xc)
        xc = self.relu(xc)
        xc = self.dropout(xc)
        xc = self.fc2(xc)
        xc = self.relu(xc)
        xc = self.dropout(xc)

        """以下为1维输出层，添加激活函数"""
        out = self.out(xc)
        # xc=self.out(xc)
        # out=self.sigmoid(xc)
        return out

        x = self.relu(self.fc_g1(x))
        x = self.dropout(x)
        x = self.fc_g2(x)
        x = self.fc1(x)
        x = self.dropout(x)
        x = self.fc2(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.relu(self.out(x)) #输出层用relu激活？？？？？？？？？？？
        """
        非线性激活函数torch.nn.functional.log_softmax(input)
        """
        return F.log_softmax(x, dim=-1)


