"""
MolTrans model - Double Towers
"""

from helper import utils
import paddle
from paddle import nn
import paddle.io as Data
import paddle.nn.functional as F
import numpy as np
import math


#设置随机种子以便复现性
paddle.seed(2)
np.random.seed(3)


class MolTransModel(nn.Sequential):
    """
    交互模块
    """
    def __init__(self, model_config):
        """
        初始化
        """
        super(MolTransModel, self).__init__()
        # 基本配置
        self.model_config = model_config 
        self.drug_max_seq = model_config['drug_max_seq'] #药物最大序列长度
        self.target_max_seq = model_config['target_max_seq'] #靶标最大序列长度
        self.emb_size = model_config['emb_size'] #嵌入维度大小
        self.dropout_ratio = model_config['dropout_ratio'] #随机失活比例
        self.input_drug_dim = model_config['input_drug_dim'] #药物输入维度
        self.input_target_dim = model_config['input_target_dim'] #靶标输入维度
        self.layer_size = model_config['layer_size'] #层数
        self.gpus = utils.device_count() #GPU数量

        # 模型配置
        self.interm_size = model_config['interm_size'] #中间层大小
        self.num_attention_heads = model_config['num_attention_heads'] #注意力头数
        self.attention_dropout_ratio = model_config['attention_dropout_ratio'] #注意力层的随机失活比例
        self.hidden_dropout_ratio = model_config['hidden_dropout_ratio'] #隐藏层的随机失活比例
        self.flatten_dim = model_config['flatten_dim'] #展平后的维度
        self.hidden_size = model_config['emb_size'] #隐藏层大小(与嵌入维度相同)

        # 嵌入增强
        self.drug_emb = EnhancedEmbedding(self.input_drug_dim, self.emb_size, self.drug_max_seq, self.dropout_ratio) #药物嵌入层
        self.target_emb = EnhancedEmbedding(self.input_target_dim, self.emb_size, self.target_max_seq, self.dropout_ratio) #靶标嵌入层
        
        
        # # 添加知识图谱嵌入层
        # self.knowledge_emb = nn.Embedding(knowledge_vocab_size, self.emb_size)  # 知识图谱嵌入层

        # 编码器模块
        self.encoder = EncoderModule(self.layer_size, self.hidden_size, self.interm_size, self.num_attention_heads,  self.attention_dropout_ratio, self.hidden_dropout_ratio)
        # 交叉信息
        self.interaction_cnn = nn.Conv2D(1, 3, 3, padding=1) # Conv2D
        #self.involution = Involution2D(in_channel=1, out_channel=3) # Involution2D

        # 解码器模块
        self.decoder = nn.Sequential(
            nn.Linear(self.flatten_dim, 512), #线性层
            nn.ReLU(), #ReLU激活函数

            nn.LayerNorm(512), #归一化层
            nn.Linear(512, 64), #线性层
            nn.ReLU(), #ReLU激活函数

            nn.LayerNorm(64), #归一化层
            nn.Linear(64, 32), #线性层
            nn.ReLU(), #ReLU激活函数

            nn.Linear(32, 1) #线性层
        )

    def forward(self, d, t, d_masking, t_masking):
        """
        双塔模型
        """
        tempd_masking = d_masking.unsqueeze(1).unsqueeze(2) #扩展维度
        tempt_masking = t_masking.unsqueeze(1).unsqueeze(2) #扩展维度
 
        tempd_masking = (1.0 - tempd_masking) * -10000.0  #掩码处理
        tempt_masking = (1.0 - tempt_masking) * -10000.0  #掩码处理

        d_embedding = self.drug_emb(d) #药物嵌入
        t_embedding = self.target_emb(t) #靶标嵌入

        #  # 添加知识图谱嵌入
        # knowledge_embedding = self.knowledge_emb(knowledge_indices)  # 知识图谱嵌入
        
        # # 将药物、靶标和知识图谱的嵌入进行拼接
        # d_embedding = paddle.concat([d_embedding, knowledge_embedding], axis=-1)
        # t_embedding = paddle.concat([t_embedding, knowledge_embedding], axis=-1)
        
        d_encoder = self.encoder(d_embedding.float(), tempd_masking.float())  #药物编码器
        t_encoder = self.encoder(t_embedding.float(), tempt_masking.float())  #靶标编码器

        drug_res = paddle.unsqueeze(d_encoder, 2).repeat(1, 1, self.target_max_seq, 1) #扩展维度和重复
        target_res = paddle.unsqueeze(t_encoder, 1).repeat(1, self.drug_max_seq, 1, 1) #扩展维度和重复

        i_score = drug_res * target_res #交叉得分
        #i_score = paddle.matmul(drug_res, target_res, transpose_y=True)

        i_scoreT = i_score.view(int(i_score.shape[0] / self.gpus), -1, self.drug_max_seq, self.target_max_seq) #调整形状
        i_scoreT = paddle.sum(i_scoreT, axis=1) #求和
        i_scoreT = paddle.unsqueeze(i_scoreT, 1) #扩展维度
        i_scoreT = F.dropout(i_scoreT, p=self.dropout_ratio) #随机失活

        i_scoreT = self.interaction_cnn(i_scoreT) #卷积操作
        i_res = i_scoreT.view(int(i_scoreT.shape[0] / self.gpus), -1) #调整形状
        res = self.decoder(i_res) #解码器
        return res


class EnhancedEmbedding(nn.Layer):
    """
    增强的药物和靶标嵌入
    """
    def __init__(self, vocab_size, hidden_size, max_position_size, dropout_ratio):
        """
        初始化
        """
        super(EnhancedEmbedding, self).__init__() 
        self.word_embedding = nn.Embedding(vocab_size, hidden_size) #词嵌入
        self.position_embedding = nn.Embedding(max_position_size, hidden_size) #位置嵌入
        self.LayerNorm = LayerNorm(hidden_size) #层归一化
        self.dropout = nn.Dropout(dropout_ratio) #随即失活

    def forward(self, input_id):
        """
        嵌入
        """
        seq_len = input_id.size(1) #序列长度
        position_id = paddle.arange(seq_len, dtype="int64") #生成位置ID
        position_id = position_id.unsqueeze(0).expand_as(input_id) #扩展维度和复制

        word_embeddings = self.word_embedding(input_id) #词嵌入
        position_embeddings = self.position_embedding(position_id) #位置嵌入

        embedding = word_embeddings + position_embeddings #合并嵌入
        embedding = self.LayerNorm(embedding) #层归一化
        embedding = self.dropout(embedding) #随机失活
        return embedding 


class LayerNorm(nn.Layer):
    """
    自定义层归一化
    """
    def __init__(self, hidden_size, variance_epsilon=1e-12):
        """
        初始化
        """
        super(LayerNorm, self).__init__()
        self.beta = paddle.create_parameter(shape=[hidden_size], dtype="float32", 
            default_initializer = nn.initializer.Assign(paddle.zeros([hidden_size], "float32")))
        self.gamma = paddle.create_parameter(shape=[hidden_size], dtype="float32", 
            default_initializer = nn.initializer.Assign(paddle.ones([hidden_size], "float32")))
        self.variance_epsilon = variance_epsilon

    def forward(self, x):
        """
        层归一化
        """
        v = x.mean(-1, keepdim=True) #平均值
        s = (x - v).pow(2).mean(-1, keepdim=True) #方差
        x = (x - v) / paddle.sqrt(s + self.variance_epsilon) #归一化
        return self.gamma * x + self.beta


class EncoderModule(nn.Layer):
    """
    多层编码器模块
    """
    def __init__(self, layer_size, hidden_size, interm_size, num_attention_heads, 
                 attention_dropout_ratio, hidden_dropout_ratio):
        """
        初始化
        """
        super(EncoderModule, self).__init__()
        module = Encoder(hidden_size, interm_size, num_attention_heads, attention_dropout_ratio, hidden_dropout_ratio)
        self.module = nn.LayerList([module for _ in range(layer_size)])

    def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
        """
        多个编码器
        """
        for layer_module in self.module:
            hidden_states = layer_module(hidden_states, attention_mask)

        return hidden_states

  
class Encoder(nn.Layer):
    """
    编码器
    """
    def __init__(self, hidden_size, interm_size, num_attention_heads, attention_dropout_ratio, hidden_dropout_ratio):
        """
        初始化
        """
        super(Encoder, self).__init__()
        self.attention = Attention(hidden_size, num_attention_heads, attention_dropout_ratio, hidden_dropout_ratio)
        self.latent = LatentModule(hidden_size, interm_size)
        self.output = Output(interm_size, hidden_size, hidden_dropout_ratio)

    def forward(self, hidden_states, attention_mask):
        """
        编码模块
        """
        attention_temp = self.attention(hidden_states, attention_mask)
        latent_temp = self.latent(attention_temp)
        module_output = self.output(latent_temp, attention_temp)
        return module_output


class Attention(nn.Layer):
    """
    注意力
    """
    def __init__(self, hidden_size, num_attention_heads, attention_dropout_ratio, hidden_dropout_ratio):
        """
        初始化
        """
        super(Attention, self).__init__()
        self.self = SelfAttention(hidden_size, num_attention_heads, attention_dropout_ratio)
        self.output = SelfOutput(hidden_size, hidden_dropout_ratio)

    def forward(self, input_tensor, attention_mask):
        """
        注意力模块
        """
        attention_output = self.self(input_tensor, attention_mask)
        self_output = self.output(attention_output, input_tensor)
        return self_output


class LatentModule(nn.Layer):
    """
    中间层
    """
    def __init__(self, hidden_size, interm_size):
        """
        初始化
        """
        super(LatentModule, self).__init__()
        self.connecter = nn.Linear(hidden_size, interm_size)

    def forward(self, hidden_states):
        """
        中间层模块
        """
        hidden_states = self.connecter(hidden_states)
        #hidden_states = F.relu(hidden_states)
        hidden_states = F.gelu(hidden_states)
        return hidden_states


class Output(nn.Layer):
    """
    输出层
    """
    def __init__(self, interm_size, hidden_size, hidden_dropout_ratio):
        """
        初始化
        """
        super(Output, self).__init__()
        self.connecter = nn.Linear(interm_size, hidden_size)
        self.LayerNorm = LayerNorm(hidden_size)
        self.dropout = nn.Dropout(hidden_dropout_ratio)

    def forward(self, hidden_states, input_tensor):
        """
        输出模块
        """
        hidden_states = self.connecter(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class SelfOutput(nn.Layer):
    """
    自输出层
    """
    def __init__(self, hidden_size, hidden_dropout_ratio):
        """
        初始化
        """
        super(SelfOutput, self).__init__()
        self.connecter = nn.Linear(hidden_size, hidden_size)
        self.LayerNorm = LayerNorm(hidden_size)
        self.dropout = nn.Dropout(hidden_dropout_ratio)

    def forward(self, hidden_states, input_tensor):
        """
        自输出模块
        """
        hidden_states = self.connecter(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class SelfAttention(nn.Layer):
    """
    自注意力
    """
    def __init__(self, hidden_size, num_attention_heads, attention_dropout_ratio):
        """
        初始化
        """
        super(SelfAttention, self).__init__()
        if hidden_size % num_attention_heads != 0:
            raise ValueError(
                 "The hidden size (%d) is not a product of the number of attention heads (%d)" % 
                 (hidden_size, num_attention_heads))
        self.num_attention_heads = num_attention_heads
        self.head_size = int(hidden_size / num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.head_size

        self.q = nn.Linear(hidden_size, self.all_head_size)
        self.k = nn.Linear(hidden_size, self.all_head_size)
        self.v = nn.Linear(hidden_size, self.all_head_size)

        self.dropout = nn.Dropout(attention_dropout_ratio)

    def score_transpose(self, x):
        """
        分数转置
        """
        temp = x.size()[:-1] + [self.num_attention_heads, self.head_size]
        x = x.view(*temp)
        return x.permute(0, 2, 1, 3)

    def forward(self, hidden_states, attention_mask):
        """
        自注意力模块
        """
        temp_q = self.q(hidden_states)
        temp_k = self.k(hidden_states)
        temp_v = self.v(hidden_states)


        q_layer = self.score_transpose(temp_q)
        k_layer = self.score_transpose(temp_k)
        v_layer = self.score_transpose(temp_v)

        attention_score = paddle.matmul(q_layer, k_layer.transpose(-1, -2))
        attention_score = attention_score / math.sqrt(self.head_size)
        attention_score = attention_score + attention_mask

        attention_prob = nn.Softmax(axis=-1)(attention_score)
        attention_prob = self.dropout(attention_prob)

        attention_layer = paddle.matmul(attention_prob, v_layer)
        attention_layer = attention_layer.permute(0, 2, 1, 3).contiguous()

        temp_attention_layer = attention_layer.size()[:-2] + [self.all_head_size]
        attention_layer = attention_layer.view(*temp_attention_layer)
        return attention_layer