import numpy as np

np.random.seed(100)
vec = np.random.rand(12) * 10

def softmax_v1(vec):                     # 原始sofemax,2-pass
    return np.exp(vec) / np.sum(np.exp(vec))

def softmax_v2(vec):                     # 安全版softmax, 3-pass
    max_val = np.max(vec)
    vec     = np.exp(vec - max_val)
    return vec / np.sum(vec)


def softmax_v3(vec):
    vlen = len(vec)
    max_val = np.max(vec)
    max_val1, max_val2 = np.max(vec[:int(vlen/2)]), np.max(vec[int(vlen/2):])
    # print(max_vl1, max_vl2)
    fx   = np.exp(vec - max_val)
    lx   = np.sum(fx[:int(vlen/2)]) * np.exp(max_val1 - max_val) +  \
            np.sum(fx[int(vlen/2):]) * np.exp(max_val2 - max_val)
    return fx / lx

# print(softmax_v1(vec))
# print("===================")
# print(softmax_v2(vec))
# print("===================")
# print(softmax_v3(vec))


# 代码输入的是logits，而且考虑很周全（我感觉漏了考虑k和p都给了的情况，这应该是不合适的）
# 巧妙地使用了torch.cumsum
# 避免了一个词都选不出来的尴尬情况


