import torch
import torch.nn.functional as F


def top_K_P_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
    assert logits.dim() == 1
    filtered_logits = logits.clone()
    # top_k
    if top_k > 0:
        # 设置topK的取值
        top_k = min(top_k, logits.size(-1))  # 确保top_k不超过logits的最后一个维度大小
        # 获取要移除索引
        # print(torch.topk(logits, top_k)) # 返回：values,indices
        # print(torch.topk(logits, top_k)[0])  # tensor([7.8792, 7.8526, 7.0686])
        # print(torch.topk(logits, top_k)[0].shape)  # torch.Size([3])
        # print(torch.topk(logits, top_k)[0][-1])  # tensor(7.0686)
        # print(torch.topk(logits, top_k)[0][..., -1])  # tensor(7.0686)
        # print(torch.topk(logits, top_k)[0][..., -1, None])  # tensor([7.0686])
        # print(torch.topk(logits, top_k)[0][..., -1, None].shape)  # torch.Size([1])
        index_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
        # print(index_remove)  # tensor([ True, True, True, True, True, True, True, False, False, False])
        # print(index_remove.shape)  # torch.Size([13317])
        # 所有移除-inf
        filtered_logits[index_remove] = filter_value
        # print(filtered_logits)  # tensor([-inf, -inf, -inf, -inf, -inf, -inf, -inf,..,  8.,  9., 10.])
        # print(filtered_logits.shape)  # torch.Size([10])

    # top_p
    if top_p > 0.0:
        # 排序：大->小
        sort_logits, sort_index = torch.sort(logits, dim=-1, descending=True)
        # print(sort_logits)  # tensor([ 7.8792,  7.8526,  7.0686,  ..., -8.6451, -8.6471,    -inf])
        # print(sort_logits.shape)  # torch.Size([13317])
        # print(sort_index)  # tensor([ 3418,   872,  5440,  ...,  8657, 11499,   100])
        # print(sort_index.shape)  # torch.Size([13317])

        # 累加和
        # tensor([8.8269e-02, 8.5956e-02, 3.9244e-02,  ..., 5.8803e-09, 5.8686e-09,0.0000e+00])
        # print(F.softmax(sort_logits, dim=-1))
        # print(F.softmax(sort_logits, dim=-1).shape)  # torch.Size([13317])
        # # tensor([0.0883, 0.1742, 0.2135,  ..., 1.0000, 1.0000, 1.0000])
        # print(torch.cumsum(F.softmax(sort_logits, dim=-1), dim=-1))
        # print(torch.cumsum(F.softmax(sort_logits, dim=-1), dim=-1).shape)  # torch.Size([13317])
        cum_sum = torch.cumsum(F.softmax(sort_logits, dim=-1), dim=-1)

        # 获取移除的索引
        index_remove = cum_sum > top_p
        # print(index_remove)  # tensor([False, True, True, True, True, True, True, True, True, True])
        # print(index_remove.shape)  # torch.Size([10])
        index_remove_ = sort_index[index_remove]
        # print(index_remove_)  # tensor([8, 7, 6, 5, 4, 3, 2, 1, 0])
        # print(index_remove_.shape)  # torch.Size([13218])
        # 所有移除的要赋值-inf
        filtered_logits[index_remove_] = filter_value
        # print(filtered_logits)
        # print(filtered_logits.shape)
    return filtered_logits
