import numpy as np
import paddle
import paddle.nn.functional as F

# 设置全局默认generator的随机种子。
paddle.seed(0)

# output = paddle.randn([2, 3])
# print('output:',output)
# print('softmax',F.softmax(output, axis=1)) #按照行将每行数据进行sofmax计算（类似于归一化）
# # 对比下列输出方式，发现输出结果是一致的
# print('F.log_softmax:',F.log_softmax(output, axis=1))
# print('paddle.log:',paddle.log(F.softmax(output, axis=1)))
#
#
# # x样本
# X_sample = paddle.to_tensor([[-1.2, -2, -3]], "float32")
# print(X_sample)
# # y标签
# Y_label = paddle.to_tensor([0], "int64")
# print(Y_label)
# # nll_loss
# print('nll_loss:',F.nll_loss(X_sample, Y_label))
#
# # 结合 log_softmax 和 nll_loss一起用
# output = paddle.to_tensor([[1.2, 2, 3]], "float32")
# target = paddle.to_tensor([0], "int64")
# print("output",output)
# print('target:',target)
# log_sm_output = F.log_softmax(output, axis=1)
# print('Output is [1.2, 2, 3]. If the target is 0, loss is:', F.nll_loss(log_sm_output, target))
# #
# target = paddle.to_tensor([1])
# log_sm_output = F.log_softmax(output, axis=1)
# print('Output is [1.2, 2, 3]. If the target is 1, loss is:', F.nll_loss(log_sm_output, target))
# #
# target = paddle.to_tensor([2])
# log_sm_output = F.log_softmax(output, axis=1)

#标签平滑操作
import numpy as pd
def softmax(x):
    x_exp=np.exp(x)
    return x_exp/np.sum(x_exp)
output=np.array([0.1,2,3.4,5])
print('1',softmax(output))

def softmax_t(x,t):
    x_exp=np.exp(x/t)
    return x_exp/np.sum(x_exp)
print('2',softmax_t(output,3))