import torch
from torchcrf import CRF


def crf():
    # pip install pytorch-crf
    num_tags = 5  # number of tags is 5
    model = CRF(num_tags, batch_first=True)

    seq_length = 3  # maximum sequence length in a batch
    batch_size = 1  # number of samples in the batch
    emissions = torch.randn(batch_size, seq_length, num_tags)
    print(111, emissions.size())
    tags = torch.tensor([
        [0, 2, 4],
    ], dtype=torch.long)  # (seq_length, batch_size)
    print(tags.size())
    a = model(emissions, tags)
    print(222, a, a.size())

    # mask size is (seq_length, batch_size)
    # the last sample has length of 1
    mask = torch.tensor([
        [1, 1, 1],
    ], dtype=torch.uint8)
    b = model(emissions, tags, mask=mask)
    print(333, b)

    c = model.decode(emissions)
    print(444, c)


crf()


def beam_search_decoder(post, top_k):
    """
    Parameters:
        post(Tensor) – the output probability of decoder. shape = (batch_size, seq_length, vocab_size). vocab
        size是对应各种状态可能性,第一列就是第一个状态的可能性,5个状态对应nlp5个词的可能性
        top_k(int) – beam size of decoder. shape
    return:
        indices(Tensor) – a beam of index sequence. shape = (batch_size, beam_size, seq_length).
        log_prob(Tensor) – a beam of log likelihood of sequence. shape = (batch_size, beam_size).
    """

    batch_size, seq_length, vocab_size = post.shape
    log_post = post.log()
    # 第一个词,其实vocab size是列数,对应各种状态,拿到第一个词的前3个状态,最后一列就是各种状态的概率
    log_prob, indices = log_post[:, 0, :].topk(top_k, sorted=True)  # first word top-k candidates
    indices = indices.unsqueeze(-1)
    for i in range(1, seq_length):
        # 第n步的所有状态重复topk,然后把前一个最好的topk分别加 [1,3,5]中每一个+[1,3,1]
        log_prob = log_prob.unsqueeze(-1) + log_post[:, i, :].unsqueeze(1).repeat(1, top_k, 1)  # word by word
        # 展开取log概率和前topk名次

        log_prob, index = log_prob.view(batch_size, -1).topk(top_k, sorted=True)
        # 竖着看每个topk最佳取值,横着一步一步,prob是最后一步最好的topk 概率值
        # 还原列序号
        index = torch.remainder(index, vocab_size)
        indices = torch.cat([indices, index.unsqueeze(-1)], dim=-1)
    return indices, log_prob


# 1 batch 2 step 5 status
post = torch.softmax(torch.randn([1, 2, 5]), -1)
indices, log_prob = beam_search_decoder(post, top_k=3)

# print(555, post.size())
# print (post)
# print(666, indices.size())
# print (indices)
# print(77, log_prob.size())
# print (log_prob)
