# -*- coding: utf-8 -*-  
'''
@author: luoyi
Created on 2021年7月4日
'''
import tensorflow as tf


#    解析y数据
def parse_y(y):
    #    et握手数据    Tensor(batch_size, max_sen_len, )
    Y_id_pos = y[:, 0, :, 0]
    if (Y_id_pos.shape[1] == 1): Y_id_pos = tf.squeeze(Y_id_pos, axis=1)
    
    #    sh=1握手数据    Tensor(batch_size, max_sen_len-2, 2)
    Y_sh_to_sh_1 = y[:, 1, :-2, :]
    if (Y_sh_to_sh_1.shape[1] == 1): Y_sh_to_sh_1 = tf.squeeze(Y_sh_to_sh_1, axis=1)
    #    sh=2握手数据    Tensor(batch_size, max_sen_len-2, 2)
    Y_sh_to_sh_2 = y[:, 2, :-2, :]
    if (Y_sh_to_sh_2.shape[1] == 1): Y_sh_to_sh_2 = tf.squeeze(Y_sh_to_sh_2, axis=1)
    #    sh=0不握手数据    Tensor(batch_size, max_sen_len-2, 2)
    Y_sh_to_sh_no = y[:, 3, :-2, :]
    if (Y_sh_to_sh_no.shape[1] == 1): Y_sh_to_sh_no = tf.squeeze(Y_sh_to_sh_no, axis=1)
    
    
    #    st=1握手数据    Tensor(batch_size, max_sen_len-2, 2)
    Y_st_to_st_1 = y[:, 4, :-2, :]
    if (Y_st_to_st_1.shape[1] == 1): Y_st_to_st_1 = tf.squeeze(Y_st_to_st_1, axis=1)
    #    st=2握手数据    Tensor(batch_size, max_sen_len-2, 2)
    Y_st_to_st_2 = y[:, 5, :-2, :]
    if (Y_st_to_st_2.shape[1] == 1): Y_st_to_st_2 = tf.squeeze(Y_st_to_st_2, axis=1)
    #    st=0不握手数据    Tensor(batch_size, max_sen_len-2, 2)
    Y_st_to_st_no = y[:, 6, :-2, :]
    if (Y_st_to_st_no.shape[1] == 1): Y_st_to_st_no = tf.squeeze(Y_st_to_st_2, axis=1)
    
    return Y_id_pos, \
            Y_sh_to_sh_1, Y_sh_to_sh_2, Y_sh_to_sh_no, \
            Y_st_to_st_1, Y_st_to_st_2, Y_st_to_st_no


#    标注索引转换成相对本批次索引
def relative_idx_sht(y):
    '''
        @param y: RaggedTensor(batch_size, max_sen_len, 2)
        @return num: Tensor(batch_size, ) 每个batch下索引数量
                indices: Tensor(num_idx, 3)  
                            0: batch_size维坐标
                            1: 每个batch下相对rel维度坐标
                            2: 每个batch下相对vec向量坐标
    '''
    #    每个batch下的索引数量
    num = tf.math.count_nonzero(y[:, :, 0] >= 0, axis=-1)       #    Tensor(batch_size, )
        
    #    取相对本轮batch的索引
    idx = tf.where(y[:, :, 0] >= 0)                             #    Tensor(sum_idx, 2)
    idx_B = idx[:, 0]
    idx_vec = tf.gather_nd(y, indices=idx)                      #    Tensor(sum_idx, 2)
    idx = tf.concat([idx_B[:, tf.newaxis], idx_vec], axis=-1)   #    Tensor(sum_idx, 3)

    return num, idx


#    掩码
def padding_mask(y_true):
    '''掩码：[PAD]为0，其他为1
        @param y_true: 真实tag，[PAD]为-1    Tensor(batch_size, max_sen_len)    
        @return: Tensor(batch_size, max_sen_len) 
                    [PAD]为0，其他为1
    '''
    mask = tf.where(y_true >= 0, x=tf.ones_like(y_true), y=tf.zeros_like(y_true))
    return mask
#    通过X数据取padding掩码
def padding_mask_from_x(x):
    '''掩码：[PAD]为0，其他为1
        @param x: padding后的x数据    Tensor(batch_size, max_sen_len)
        @return: Tensor(batch_size, max_sen_len) 
                    [PAD]为0，其他为1
    '''
    mask = tf.where(x > 0, tf.ones_like(x), tf.zeros_like(x))
    return mask


#    求真实标注的得分
def score(y_pred, y_true, transfer):
    '''求真实标注的得分
        score(i) = exp(∑(t=1->T-1) Bi[t] + A[Bi[t-1], Bi[t]])               ------ 0,T时刻分别为start,end，忽略
                 = exp(∑(t=1->T-1) Bi[t] + ∑(t=1->T-1) A[Bi[t-1], Bi[t]]))
        log(score(i)) = ∑(t=1->T-1) Bi[t] + ∑(t=1->T-1) A[Bi[t-1], Bi[t]])
        Bi[t] = y_pred[t, y_true[t]]                                        ------ t时刻正确标注的预测得分
        A[Bi[t-1], Bi[t]] = transfer[y_true[t-1], y_true[t]]                ------ t-1时刻到t时刻正确标注的转移概率 
        注：1 实际求的是1->T-1. 0时刻为[GO],T时刻为[EOS]，这里实际代表start和end，不参与序列计算
        
        @param y_pred: 各个时刻所有标注的预测得分    Tensor(batch_size, max_sen_len, tag_size) Bi[0]=start=0, Bi[-1]=end=0
        @param y_true: 各个时刻正确的标注编码        Tensor(batch_size, max_sen_len)
        @param transfer: 状态转移矩阵              Tensor(tag_size, tag_size)    tag_size包含[CLS][EOS]
        @return 每个批次的得分. Tensor(batch_size)
    '''
    #    每个批次的实际长度（去掉[PAD]）    RaggedTensor(batch_size, real_sen_len)
    real_sen_len = tf.math.count_nonzero(y_true >= 0, axis=-1)
    
    #    每个批次、每个时刻 正确标注的预测得分    Tensor(batch_size, real_sen_len)
    idx_t = tf.where(y_true >= 0)                                                        #    Tensor(batch_size, 2)
    idx_t = tf.concat([idx_t, tf.expand_dims(y_true[y_true>=0], axis=-1)], axis=-1)      #    Tensor(batch_size, 3)    [idx_b, idx_s, idx_tag]
    y_pred = tf.gather_nd(y_pred, indices=idx_t)                                         #    Tensor(batch_size * real_sen_len, )
    #    每个batch_size正确路径的得分向量（每个batch_size包含开头[GO]和结尾[EOS]）    RaggedTensor(batch_size, real_sen_len)
    b = tf.RaggedTensor.from_row_lengths(y_pred, real_sen_len)
    sum_b = tf.math.reduce_sum(b[:, 1:-1], axis=-1)                                      #    去掉[GO]和[EOS]，与转移矩阵长度保持一致。[GO]与[EOS]的状态得分都是0
    
    #    求每个batch_size，每个时刻的转移得分，实际计算的是start->t1->t2->...->T-1
    a_idx = tf.stack([y_true[:, 0:-1], y_true[:, 1:]], axis=-1)
    a_idx = a_idx[a_idx[:, :, 1] >= 0]                                          #    去掉[PAD]
    a_idx = tf.RaggedTensor.from_row_lengths(a_idx, real_sen_len-1)             #    -1是为了去掉最后的[EOS]
    a = tf.gather_nd(transfer, a_idx[:, :-1])                                   #    -1是为了去掉最后的T -> [EOS]
    sum_a = tf.math.reduce_sum(a, axis=-1)                                      #    [GO]->t1->t2->...->T->[EOS]的得分

    sum_i = sum_b + sum_a
#     sum_i = tf.math.exp(sum_i)                           #    log (exp (score)) = score
    return sum_i


#    计算全路径得分
def score_full_path(y_pred, y_true, transfer):
    '''递归过程如下：
        ------ t=1 ------
        obs[1] = {B[1,i] + A[start,i] | i∈M}
        scores[1] = obs
        prev[1] = obs
        ------ t=2 ------
        obs[2,i,j] = B[2,j] + A[i,j]
        obs[2] = { B[2,j] + A[i,j] | j∈M,i∈M }
        scores[2,i,j] = prev[1,i] + obs[2,i,j]
        scores[2] = { prev[1,i] + obs[2,i,j] | j∈M,i∈M}
        prev[2,i] = log(∑(j∈M)exp(scores[2,i,j]))
        prev[2] = { prev[2,i] | i∈M }
        ...
        ------ t=T ------
        obs[T,i,j] = B[T,j] + A[i,j]
        obs[T] = { B[T,j] + A[i,j] | j∈M,i∈M }
        scores[T,i,j] = prev[T-1,i] + obs[T,i,j]
        scores[T] = { prev[T-1,i] + obs[T,i,j] | j∈M,i∈M}
        prev[T,i] = log(∑(j∈M)exp(scores[T,i,j]))
        prev[T] = { prev[T,i] | i∈M }    
        
        Z = log(∑(i∈M)exp(prev[T,i]))
        
        @param y_pred: 状态特征矩阵 Tensor(batch_size, max_sen_len, tag_size)
        @param y_true: 真实标注序列。[PAD]为-1 Tensor(batch_size, max_sen_Len) 
        @param transfer: 转移特征矩阵 Tensor(tag_size, tag_size)
    '''
    #    最大句子长度
    max_sen_len = y_true.shape[-1]
    #    掩码（[PAD]为0，有值的地方为1）    Tensor(batch_size, max_sen_Len)
    pad_mask = padding_mask(y_true)
    batch_size = tf.math.count_nonzero(pad_mask[:,0] >= 0)
    #    [EOS]的部分也会进入计算，需要将[EOS]也当做掩码处理
    pad_mask = tf.RaggedTensor.from_tensor(pad_mask, padding=0)
    pad_mask = pad_mask[:, :-1]
    pad_mask = tf.RaggedTensor.to_tensor(pad_mask, default_value=0, shape=(batch_size, max_sen_len))
    pad_mask = tf.cast(pad_mask, tf.float32)
    
    #    计算t=1时刻
    B = y_pred[:, 1, 1:-1]                  #    Tensor(batch_size, tag_size-2)
    A = transfer[0, 1:-1]                   #    Tensor(tag_size-2, )
    obs = B + A[tf.newaxis, :]              #    Tensor(batch_size, tag_size-2)
    scores = obs
    prev = scores
    #    计算t=2 -> T时刻
    for t in range(2, max_sen_len):
        B = y_pred[:, t, 1:-1][:, tf.newaxis, :]                #    Tensor(batch_size, 1, tag_size-2)
        A = transfer[1:-1, 1:-1][tf.newaxis, :, :]              #    Tensor(batch_size, tag_size-2, tag_size-2)
        #    obs[t,i,j] = B[t,j] + A[i,j]
        obs = B + A                                             #    Tensor(batch_size, tag_size-2, tag_size-2)
        #    scores[t,i,j] = prev[t-1,i] + obs[t,i,j]
        scores = prev[:, :, tf.newaxis] + obs                   #    Tensor(batch_size, tag_size-2, tag_size-2)
        #    prev[t,i] = log( ∑(j∈M) exp (scores[t,i,j]))
        max_val = tf.math.reduce_max(scores, axis=(1, 2))
        prev_t = max_val[:, tf.newaxis] + tf.math.log(tf.math.reduce_sum(tf.math.exp(scores - max_val[:, tf.newaxis, tf.newaxis]), axis=-1))    #    Tensor(batch_size, tag_size-2)
        
        #    根据掩码回滚当前计算
        mask = tf.tile(pad_mask[:, t][:, tf.newaxis], multiples=(1, prev_t.shape[-1]))
        prev = tf.where(mask == 0, prev, prev_t)
        pass
    
    #    Z = log(∑(i∈M)exp(prev[T,i]))
    max_val = tf.math.reduce_max(prev, axis=-1)
    Z_exp = tf.math.log(tf.math.reduce_sum(tf.math.exp(prev - max_val[:, tf.newaxis]), axis=-1))  
    Z = max_val + Z_exp                                         #    Tensor(batch_size, )
    return Z



#    求最大概率路径
def viterbi(y_pred, pad_mask, transfer):
    '''求最大概率路径
        viterbi算法：
            step1: 计算t=1时刻所有路径概率。start -> {m1, m2, ... , M}全路径概率，保存为向量
                    score(m) = B[1,m]
                    crt_score = B[1] * A[start, B[1]]
                    crt_path = B[1]
            step2: 计算t=2时刻所有节点的最大概率路径。start -> t1(M个节点) -> t2(M个节点)
                    score(m) = max(B[1,m1] + B[2,m]*A[m1,m], B[1,m2] + B[2,m]*A[m2,m], ..., B[1,M] + B[2,m]*A[M,m] )
                    crt_score = {score(m1), score(m2), ..., score(M)}
                    crt_path = concat(crt_path, score(m)中的m)
            ...
            stept: 计算t时刻所有节点的最大概率路径。start -> t1(M个节点) -> t2(M个节点) -> ... -> t(M个节点)
                    score(m) = max(B[t-1,m1] + B[t,m]*A[m1,m], B[t-1,m2] + B[t,m]*A[m2,m], ..., B[t-1,M] + B[t,m]*A[M,m] )
                    crt_score = {score(m1), score(m2), ..., score(M)}
                    crt_path = concat(crt_path, score(m)中的m)
            ...
            stepT: 计算T时刻所有节点的最大概率路径。start -> t1(M个节点) -> t2(M个节点) -> ... -> t(M个节点)
                    score(m) = max(B[T-1,m1] + B[T,m]*A[m1,m], B[T-1,m2] + B[T,m]*A[m2,m], ..., B[T-1,M] + B[T,m]*A[M,m] )
                    crt_score = {score(m1), score(m2), ..., score(M)}
                    crt_path = concat(crt_path, score(m)中的m)
            max(crt_score)即为最优路径概率
            max(crt_score)对应的crt_path即为最优路径。crt_score与crt_path中最后1位的m位置对应
        @param y_pred: 状态矩阵      Tensor(batch_size, max_sen_len, tag_size)
        @param pad_mask: 掩码矩阵。[PAD]为0，其他为1    Tensor(batch_size, max_sen_len)
        @param transfer: 转移矩阵    Tensor(tag_size, tag_size)
        @return: max_score_path: 最优路径                                    RaggedTensor(batch_size, real_sen_len)    不包括[GO]和[EOS]
                 max_score: 最后一个节点每个路径得分（max即为最优路径得分）        Tensor(batch_size, tag_size)
                 crt_path: 除最优路径外最后一个节点的其他选择                    Tensor(batch_size, )
                 crt_score: 最后一个节点的M条路径得分（crt_path中所有路径的得分）  Tensor(batch_size, tag_size, max_sen_len)
    '''
    (batch_size, max_sen_len, tag_size) = y_pred.shape
    
    #    1时刻    Tensor(batch_size, tag_size-2)
    B1 = y_pred[:, 1, 1:-1]                                                             #    Tensor(batch_size, tag_size-2)
    A1 = tf.cast(transfer[tf.newaxis, 0, 1:-1], dtype=tf.float32)                       #    Tensor(1, tag_size-2)
    his_score = B1 * A1                                                                 #    历史路径得分记录    Tensor(batch_size, tag_size-2)
    his_path = tf.range(1, tag_size-1, dtype=tf.int64)[tf.newaxis, :, tf.newaxis]
    his_path = tf.repeat(his_path, repeats=batch_size, axis=0)
    
    #    2 -> max_sen_len时刻
    for t in range(2, max_sen_len):
        #    计算t时刻得分矩阵score[t]. score[i,j] = his_score[j] + B[i] * A[j, i]
        #    将crt_score扩展为(batch_size, tag_size-2, tag_size-2), 每行表示t-1时刻全部节点的最优路径得分
        prev_score = his_score[:, tf.newaxis, :]
        #    取当前节点得分向量，
        #    并扩展为(batch_size, tag_size-2, tag_size-2)，1行表示同一个节点的得分，每列表示每个节点得分
        B = y_pred[:, t, 1:-1][:, :, tf.newaxis]
        #    取状态转移矩阵A，A[i,j] = i转移到j个概率
        A = transfer[1:-1, 1:-1][tf.newaxis, :, :]
        #    score[i,j] = t时刻第i个节点的得分 * t-1时刻由第j个节点转移到t时刻第i个节点的概率 + t-1时刻第j个节点的最大路径得分
        score = prev_score + B * A
        #    求每个节点的最大得分，和最大得分路径
        max_score = tf.math.reduce_max(score, axis=-1)                  #    Tensor(batch_size, tag_size-2)
        max_path = tf.math.argmax(max_score, axis=-1) + 1               #    +1是因为y_pred每个时刻的第0个节点为[GO]的概率，这里忽略掉 Tensor(batch_size, tag_size-2)
        
        #    第t时刻的mask    Tensor(batch_size, )，[PAD]为0，其他为1
        mask = pad_mask[:, t]
        #    [PAD]的位置以-1填充
        max_path = tf.where(mask > 0, max_path, -tf.ones_like(max_path))
        #    [PAD]的位置his_score保持不变，其他则更新
        his_score = tf.where(tf.repeat(mask[:, tf.newaxis], repeats=max_score.shape[-1], axis=-1) > 0, max_score, his_score)            #    Tensor(batch_size, tag_size-2)
        
        max_path = tf.repeat(max_path[:, tf.newaxis, tf.newaxis], repeats=tag_size-2, axis=1)
        his_path = tf.concat([his_path, max_path], axis=-1)             #    Tensor(batch_size, tag_size-2, max_sen_len)
        
        pass

    #    取当前最优路径
    idx = tf.math.argmax(his_score, axis=-1)
    idx = tf.stack([tf.range(batch_size, dtype=tf.int64)[:, tf.newaxis], idx[:, tf.newaxis]], axis=-1)
    max_path = tf.gather_nd(his_path, idx)
    max_path = tf.squeeze(max_path, axis=1)
    #    从1时刻开始，到pad出现的前一个时刻结束（预测不包括[GO]，和[EOS]）
    max_path = tf.RaggedTensor.from_tensor(max_path, padding=-1)
    max_path = max_path[:, :-1]
    max_score = tf.math.reduce_max(his_score, axis=-1)
    
    return max_path, max_score, his_score, his_path