import torch


def tlog(x):
    return torch.where(x == 0, torch.tensor(1e-12), torch.log(x))


def n_gram_P(n_gram, candidate_sentence, reference_sentence):
    tensors_list = []
    for i in range(reference_sentence.shape[1]):
        same = torch.sum(candidate_sentence[:, i:i + n_gram] == reference_sentence[:, i:i + n_gram], dim=-1)
        same = torch.where(same < n_gram, torch.tensor(0), same)
        same = torch.where(same == n_gram, torch.tensor(1), same)
        tensors_list.append(same.data)
        if (i + n_gram) == reference_sentence.shape[1]: break
    P = torch.stack(tensors_list)
    return torch.sum(P, dim=0) / len(P)


def bleu(candidate_sentence, reference_sentence):
    tensors_list = []
    for n in range(1, reference_sentence.shape[1] + 1):
        tensors_list.append(n_gram_P(n, candidate_sentence, reference_sentence))
    P = torch.stack(tensors_list)
    return torch.mean(torch.exp(torch.sum((1 / len(P)) * tlog(P), dim=0))).item()


# 示例使用
# candidate = torch.tensor([[1, 2, 3, 4, 1, 5, 1, 5],
#                           [1, 2, 3, 4, 1, 5, 1, 5]])
# reference = torch.tensor([[1, 2, 6, 4, 1, 5],
#                           [1, 2, 6, 4, 1, 5]])
# print(bleu(candidate, reference))
