import torch

# def gumbel_sampling(probs,pi,xi,topk=30):
#     vocab_size = probs.shape[1]
#     top_probs, top_indices = torch.topk(probs,topk,dim=-1) # [batch_size,topk]

#     assert (pi == torch.arange(vocab_size)).all(), "EXP only supports identity permutation"

#     # xi [batch_size,vocab_size]
#     # xi.gather(-1,top_indices)[i][j] -> xi[i][top_indices[j]]
#     # top_sampled_indices.unsqueeze(-1) [batch_size, 1]
#     # top_indices.gather(-1,top_sampled_indices.unsqueeze(-1))[i][j] -> top_indices[i][top_sampled_indices[j]]
#     top_sampled_indices = torch.argmax(xi.gather(-1, top_indices) ** (1/top_probs),dim=-1)
#     return top_indices.gather(-1,top_sampled_indices.unsqueeze(-1))

def gumbel_sampling(probs,pi,xi):
    vocab_size = probs.shape[1]
    assert (pi == torch.arange(vocab_size)).all(), "EXP only supports identity permutation"
    # return torch.argmax(xi ** (1/torch.gather(probs, 1, pi)),axis=1).unsqueeze(-1)
    return torch.argmax(xi ** (1/probs),axis=1).unsqueeze(-1)
