# -*- encoding: utf-8 -*-
"""
@File    : one_hot.py
@Author  : lilong
@Time    : 2023/2/27 2:45 下午
"""

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Layer, LSTM, Lambda, Embedding
from tensorflow.keras import backend as K

# 模型的输入是词ID矩阵，形状为[batch_size, seq_len]
x = np.array(
            [[5, 3, 0, 0],
             [2, 2, 8, 0]]
           )


# 这样生成的mask矩阵大小是[batch_size, seq_len, 1]，
x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(x)
print('x_mask:', x_mask)
# x_mask: tf.Tensor(
# [[[1.]
#   [1.]
#   [0.]
#   [0.]],
#
#  [[1.]
#   [1.]
#   [1.]
#   [0.]]], shape=(2, 4, 1), dtype=float32)


x = K.cast(x, 'int32')  # 类型转换
x = K.one_hot(x, 8)   # num_classes=8，必须大于等于字典大小
print('K.one_hot:', x)
# 由下标向量转换为（0，1）矩阵：(btz, seq_len, num_classes)
# K.one_hot: tf.Tensor(
# [[[0. 0. 0. 0. 0. 1. 0. 0.]
#   [0. 0. 0. 1. 0. 0. 0. 0.]
#   [1. 0. 0. 0. 0. 0. 0. 0.]
#   [1. 0. 0. 0. 0. 0. 0. 0.]],
#
#  [[0. 0. 1. 0. 0. 0. 0. 0.]
#   [0. 0. 1. 0. 0. 0. 0. 0.]
#   [0. 0. 0. 0. 0. 0. 0. 0.]
#   [1. 0. 0. 0. 0. 0. 0. 0.]]], shape=(2, 4, 8), dtype=float32)

# (btz, seq_len, 1) * (btz, seq_len, num_classes) 只保留有效字的一维编码，MASK的编码为0
# K.sum，输出其中一维向量由1，0组成，但保持了所有维度，其中第二个维度缩放
x = K.sum(x_mask * x, axis=1, keepdims=True)
print('K.sum:', x)
# K.sum: tf.Tensor(
# [[[0. 0. 0. 1. 0. 1. 0. 0.]]
#  [[0. 0. 2. 0. 0. 0. 0. 0.]]], shape=(2, 1, 8), dtype=float32)


# 转换成一维向量由0，1组成
x = K.cast(K.greater(x, 0.5), 'float32')
print(x)
# tf.Tensor(
# [[[0. 0. 0. 1. 0. 1. 0. 0.]]
#  [[0. 0. 1. 0. 0. 0. 0. 0.]]], shape=(2, 1, 8), dtype=float32)



