# -*- coding: utf-8 -*-  
'''
TPLinker相关零件

@author: luoyi
Created on 2021年6月24日
'''
import tensorflow as tf

import utils.conf as conf
import utils.relationships as rel


#    输入数据整理层
class InputLayer(tf.keras.layers.Layer):
    '''输入数据整理
        X: Tensor(2 + max_sen_len, max_sen_len)
        前2维是bert输入：句子词编码 + 句子位置编码
        后max_sen_len * max_sen_len标注所有可能的握手位置。可能握手为1，否则为0
    '''
    def __init__(self, 
                 name='input_layer',
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                 batch_size=conf.DATASET_BAIDU.get_batch_size(),
                 **kwargs):
        super(InputLayer, self).__init__(name=name, **kwargs)
        
        self._max_sen_len = max_sen_len
        self._batch_size = batch_size
        pass
    
    def build(self, input_shape):
        super(InputLayer, self).build(input_shape)
        
        self._X_tpl = self.add_weight(name='X_tpl', 
                                      shape=(self._batch_size, self._max_sen_len - 2, self._max_sen_len - 2), 
                                      dtype=tf.int64, 
                                      initializer=tf.initializers.zeros(), 
                                      trainable=False)
        pass
    
    def call(self, inputs, **kwargs):
        '''
            X: Tensor(2 + max_sen_len, max_sen_len)
            前2维是bert输入：句子词编码 + 句子位置编码
            后max_sen_len * max_sen_len标注所有可能的握手位置。可能握手为1，否则为0
        '''
        #    分别取出 bert输入 和索引模板
        X_bert, X_tpl = tf.split(inputs, num_or_size_splits=[2, self._max_sen_len], axis=1)
        
        #    有点奇怪，找不到哪里转成float32了。。。
        X_tpl = tf.cast(X_tpl, dtype=tf.int64)
        X_tpl = X_tpl[:, :-2, :-2]              #    去掉[CLS][EOS]的占位长度
        self._X_tpl.assign(X_tpl)
        return X_bert

    def get_X_tpl(self):
        return self._X_tpl.value()
    
    pass


#    TPLinkerLayer
class TPLinkerLayer(tf.keras.layers.Layer):
    def __init__(self,
                 name='tplinker_layer',
                 d_model=conf.BERT.get_d_model(),
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                 rel_size=len(rel.id_rel),
                 batch_size=conf.DATASET_BAIDU.get_batch_size(),
                 **kwargs):
        super(TPLinkerLayer, self).__init__(name, **kwargs)
        
        self._d_model = d_model
        self._max_sen_len = max_sen_len
        self._rel_size = rel_size
        self._batch_size = batch_size
        pass

    def build(self, input_shape):
        #    握手层
        self._handshaking_layer = HandshakingKernelLayer(name=self.name + '_handshaking_kernel_layer', 
                                                         d_model=self._d_model, 
                                                         max_sen_len=self._max_sen_len)
        
        #    fc层
        self._fc_layer = FCLayer(name=self.name + '_fc_layer', 
                                 max_sen_len=self._max_sen_len, 
                                 rel_size=self._rel_size, 
                                 batch_size=self._batch_size)
        
        super(TPLinkerLayer, self).build(input_shape)
    
    def call(self, inputs, **kwargs):
        #    过握手层，转为向量
        x_vec = self._handshaking_layer(inputs)
        
        #    过fc层，拿到实体和关系预测
        _ = self._fc_layer(x_vec)
        
        #    返回什么不重要了，结果从fc_layer里拿
        return inputs
    pass


#    握手层
class HandshakingKernelLayer(tf.keras.layers.Layer):
    '''握手层
        词向量之间两两握手，组成∑max_sen_len长度的向量
        hij = tanh(W*[hi;hj] + b)    j>=i
    '''
    def __init__(self, 
                 name='handshaking_kernel_layer',
                 d_model=conf.BERT.get_d_model(),
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                 **kwargs):
        super(HandshakingKernelLayer, self).__init__(name=name, **kwargs)
        
        self._d_model = d_model
        max_sen_len = max_sen_len - 2                   #    去掉首尾的[CLS]和[EOS]
        vec_sen_len = 0
        for i in range(1, max_sen_len+1): vec_sen_len += i      #    计算拉直后的向量长度
        self._vec_len = vec_sen_len
        pass
    
    def build(self, input_shape):
        self._fc = tf.keras.layers.Dense(name=self.name + '_fc',
                                         units=self._d_model, 
                                         kernel_initializer=tf.initializers.he_normal(), 
                                         bias_initializer=tf.initializers.he_normal())
        
        super(HandshakingKernelLayer, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        #    X Tensor(batch_size, max_sen_len, d_model)    
        X = inputs[:, 1:-1, :]                          #    去掉每行首尾[CLS]和[EOS]。
                                                        #    [CLS]的位置是对的，但[EOS]可能不对。但无所谓，后面是用索引来取
        (_, max_sen_len, _) = X.shape
        
        #    做成 X*X 张量
        X_i = tf.tile(X[:, :, tf.newaxis, :], multiples=[1, 1, max_sen_len, 1])
        X_j = tf.tile(X[:, tf.newaxis, :, :], multiples=[1, max_sen_len, 1, 1])
        X_mat = tf.concat([X_i, X_j], axis=-1)          #    Tensor(batch_size, max_sen_len-2, max_sen_len-2, 2*d_model)
        
        #    计算：W*[hi;hj] + b
        X_mat = self._fc(X_mat)                         #    Tensor(batch_size, max_sen_len-2, max_sen_len-2, d_model)
        
        #    计算：tanh(W*[hi;hj] + b)
        X_mat = tf.nn.tanh(X_mat)                       #    Tensor(batch_size, max_sen_len-2, max_sen_len-2, d_model)
        
        #    取上三角（包括对角线）部分
        mat_tmp = tf.ones_like(X_mat)                           #    因为拿不到batch_size，这里做了一个模板（ones_like会包含X的batch_size信息）
        mat_tmp = mat_tmp[:, :, :, 0]                           #    最后一维是不用的，这里只取[batch_size, max_sen_len, max_sen_len]
        idx_tril = tf.linalg.band_part(input=mat_tmp, num_lower=0, num_upper=max_sen_len)       #    上三角的索引
        #    上三角部分拉直成向量 Tensor(batch_size, ∑max_sen_len, d_model)
        idx_tril = tf.cast(idx_tril, dtype=tf.int64)
        idx_tril = tf.where(idx_tril == 1)
        X_vec = tf.gather_nd(X_mat, indices=idx_tril)           
        X_vec = tf.reshape(X_vec, shape=(-1, self._vec_len, self._d_model))
        
        return X_vec
    
    pass


#    fc层
class FCLayer(tf.keras.layers.Layer):
    def __init__(self,
                 name='fc_layer',
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                 rel_size=len(rel.id_rel),
                 batch_size=conf.DATASET_BAIDU.get_batch_size(),
                 **kwargs):
        super(FCLayer, self).__init__(name=name, **kwargs)
        
        self._rel_size = rel_size
        self._batch_size = batch_size
        
        #    计算∑max_sen_len
        max_sen_len = max_sen_len - 2               #    去掉首尾[CLS][EOS]
        vec_len = 0
        for i in range(1, max_sen_len+1): vec_len += i
        self._vec_len = vec_len
        pass
    
    def build(self, input_shape):
        #    et2et的fc
        self._fc_et_to_et = tf.keras.layers.Dense(name=self.name + '_fc_et_to_et',
                                                  units=2, 
                                                  kernel_initializer=tf.initializers.he_normal(), 
                                                  bias_initializer=tf.initializers.he_normal())
        #    et2et数据暂存
        self._et_to_et = None
        
        #    sht_to_sht的fc
        self._fc_sht_to_sht = tf.keras.layers.Dense(name=self.name + '_fc_sht_to_sht',
                                                    units=self._rel_size * 2 * 3, 
                                                    kernel_initializer=tf.initializers.he_normal(), 
                                                    bias_initializer=tf.initializers.he_normal())
        self._sht_to_sht = None
        
        super(FCLayer, self).build(input_shape)
        pass
    
    def call(self, inputs, **kwargs):
        #    inputs Tensor(batch_size, ∑max_sen_len, d_model)
        
        #    计算et握手指针 Tensor(batch_size, ∑max_sen_len, 2)
        et_to_to = self._fc_et_to_et(inputs)
        et_to_to = tf.nn.softmax(et_to_to, axis=-1)
        self._et_to_et = et_to_to
        
        #    计算sht握手指针 Tensor(batch_size, ∑max_sen_len, rel_size, 2, 3)
        #        rel_size：表示每个关系
        #        2：0表示sh_to_sh，1表示st_to_st
        #        3：表示3种可能的取值[0, | 1 | 2]
        sht_to_sht = self._fc_sht_to_sht(inputs)
        sht_to_sht = tf.reshape(sht_to_sht, shape=[-1, self._vec_len, self._rel_size, 2, 3])
        #    调换维度位置，Tensor(batch_size, rel_size, ∑max_sen_len, 2, 3)
        sht_to_sht = tf.transpose(sht_to_sht, perm=[0, 2, 1, 3, 4])         
        sht_to_sht = tf.nn.softmax(sht_to_sht, axis=-1)
        self._sht_to_sht = sht_to_sht
        
        return inputs
    
    def get_et_to_et(self):
        return self._et_to_et
    def get_sht_to_sht(self):
        return self._sht_to_sht
    
    pass