
import numpy as np
import tensorflow as tf
# import matplotlib.pyplot as plt
# from tensorflow.keras import Model
# from tensorflow.keras.layers import Dense

# model = tf.keras.models.Sequential()
# model.add(tf.keras.Input(shape=(16)))
# model.add(tf.keras.layers.Dense(32, activation='relu'))
# # Now the model will take as input arrays of shape (None, 16)
# # and output arrays of shape (None, 32).
# # Note that after the first layer, you don't need to specify
# # the size of the input anymore:
# # model.add(tf.keras.layers.Dense(32))
# print(model.output_shape)
# model.summary()



# x = np.arange(10).reshape(1, 5, 2)
# print(x)





# y = np.arange(10, 20).reshape(1, 2, 5)
# print(y)


# # y=tf.keras.layers.Dot(axes=(1, 2))([x, y])
# y=tf.matmul(y, x)
# print(y)

# y_true = [[1, 2]]
# y_pred = [[
#     [0.05, 0.95, 0], 
#     [0.1, 0.8, 0.1]]]
# # Using 'auto'/'sum_over_batch_size' reduction type.
# scce = tf.keras.losses.SparseCategoricalCrossentropy()
# r=scce(y_true, y_pred).numpy()
# print(r)
# y_true=[[
#         [0,1,0],
#         [0,0,1,]
#         ]]

# scce = tf.keras.losses.CategoricalCrossentropy()
# r=scce(y_true, y_pred).numpy()
# print(r)

# # tf.keras.losses.categorical_crossentropy(y_true, y_pred)
# # z=y_true*tf.math.log(y_pred)
# # print(z)
# print((tf.math.log(0.95)+tf.math.log(0.1))/2)
# x=tf.constant([3.,2])
x=tf.constant([
    [1,2],
    [7,5],
    [0,5],
    [3,4],
    [6,7]
])

# print(tf.tensordot(y,x,axes=1))
y=tf.one_hot(x,depth=4,axis=2,on_value=1.0)
print(y)
# x=tf.constant([
#     [[0. ,0.2 ,0.8],[0.2, 0.8, 0.]],
#     [[0., 0.3 ,0.7],[0.6, 0.4 ,0.]]])
# z=tf.keras.losses.categorical_crossentropy(y, x)
# print(z)
