# -*- coding: utf-8 -*-  
'''
运行期的函数

@author: luoyi
Created on 2021年6月28日
'''
import tensorflow as tf


#    解析标记数据
def parse_y(y):
    #    et握手数据    Tensor(batch_size, max_sen_len, )
    Y_et_to_et = y[:, 0, :, 0]
    if (Y_et_to_et.shape[1] == 1): Y_et_to_et = tf.squeeze(Y_et_to_et, axis=1)
    Y_et_no = y[:, 1, :, 0]
    if (Y_et_no.shape[1] == 1): Y_et_no = tf.squeeze(Y_et_no, axis=1)
    
    #    sh=1握手数据    Tensor(batch_size, max_sen_len, 2)
    Y_sh_to_sh_1 = y[:, 2, :, :]
    if (Y_sh_to_sh_1.shape[1] == 1): Y_sh_to_sh_1 = tf.squeeze(Y_sh_to_sh_1, axis=1)
    #    sh=2握手数据    Tensor(batch_size, max_sen_len, 2)
    Y_sh_to_sh_2 = y[:, 3, :, :]
    if (Y_sh_to_sh_2.shape[1] == 1): Y_sh_to_sh_2 = tf.squeeze(Y_sh_to_sh_2, axis=1)
    #    sh=0不握手数据    Tensor(batch_size, max_sen_len, 2)
    Y_sh_to_sh_no = y[:, 4, :, :]
    if (Y_sh_to_sh_no.shape[1] == 1): Y_sh_to_sh_no = tf.squeeze(Y_sh_to_sh_no, axis=1)
    
    
    #    st=1握手数据    Tensor(batch_size, max_sen_len, 2)
    Y_st_to_st_1 = y[:, 5, :, :]
    if (Y_st_to_st_1.shape[1] == 1): Y_st_to_st_1 = tf.squeeze(Y_st_to_st_1, axis=1)
    #    st=2握手数据    Tensor(batch_size, max_sen_len, 2)
    Y_st_to_st_2 = y[:, 6, :, :]
    if (Y_st_to_st_2.shape[1] == 1): Y_st_to_st_2 = tf.squeeze(Y_st_to_st_2, axis=1)
    #    st=0不握手数据    Tensor(batch_size, max_sen_len, 2)
    Y_st_to_st_no = y[:, 7, :, :]
    if (Y_st_to_st_no.shape[1] == 1): Y_st_to_st_no = tf.squeeze(Y_st_to_st_2, axis=1)
    
    return Y_et_to_et, Y_et_no, \
            Y_sh_to_sh_1, Y_sh_to_sh_2, Y_sh_to_sh_no, \
            Y_st_to_st_1, Y_st_to_st_2, Y_st_to_st_no
                

#    标注索引转换成相对本批次索引
def relative_idx_et(y):
    '''
        @param y: RaggedTensor(batch_size, None)
        @return num: Tensor(batch_size, ) 每个batch下索引数量
                indices: Tensor(num_idx, 2)  
                            0: batch_size维坐标
                            1: 每个batch下相对vec向量坐标
    '''
    #    每个batch下的索引数量
    num = tf.math.count_nonzero(y >= 0, axis=-1)         #    Tensor(batch_size, )
        
    #    取相对本轮batch的索引
    idx = tf.where(y >= 0)                          #    Tensor(sum_idx, 2)
    idx_B = idx[:, 0]
    idx_vec = tf.gather_nd(y, indices=idx)
    idx = tf.stack([idx_B, idx_vec], axis=-1)       #    Tensor(sum_idx, 2)
    return num, idx


#    标注索引转换成相对本批次索引
def relative_idx_sht(y):
    '''
        @param y: RaggedTensor(batch_size, max_sen_len, 2)
        @return num: Tensor(batch_size, ) 每个batch下索引数量
                indices: Tensor(num_idx, 3)  
                            0: batch_size维坐标
                            1: 每个batch下相对rel维度坐标
                            2: 每个batch下相对vec向量坐标
    '''
    #    每个batch下的索引数量
    num = tf.math.count_nonzero(y[:, :, 0] >= 0, axis=-1)       #    Tensor(batch_size, )
        
    #    取相对本轮batch的索引
    idx = tf.where(y[:, :, 0] >= 0)                             #    Tensor(sum_idx, 2)
    idx_B = idx[:, 0]
    idx_vec = tf.gather_nd(y, indices=idx)                      #    Tensor(sum_idx, 2)
    idx = tf.concat([idx_B[:, tf.newaxis], idx_vec], axis=-1)   #    Tensor(sum_idx, 3)

    return num, idx