import numpy as np
import torch


def insert_blank(label, blank_id=0):
    """
    Insert blank token between every two label token.
    Args:
        label (list|np.ndarray|torch.tensor): label list, one dimension, shape is (n,)
        blank_id (int): blank token id
    Returns:
        labels(np.ndarray):, shape is (2n + 1, )
    Example:
        >>>input = np.array([1, 2, 3, 4])
        >>>input = input.tolist()
        >>>input = torch.tensor(input)
        >>>print(insert_blank(input))
    """
    label = np.expand_dims(label, 1)
    blanks = np.zeros((label.shape[0], 1), dtype=np.int64) + blank_id
    label = np.concatenate([blanks, label], axis=1)
    label = label.reshape(-1)
    label = np.append(label, label[0])
    return label


def forced_align(ctc_probs: torch.Tensor,
                 y: torch.Tensor,
                 blank_id=0):
    """ctc forced alignment.

    Args:
        torch.Tensor ctc_probs: hidden state sequence, 2d tensor (T, D)
        torch.Tensor y: id sequence tensor 1d tensor (L)
        int blank_id: blank symbol index
    Returns:
        torch.Tensor: alignment result, one dimension
    Examples:
        >>>vocab_size = 30
        >>>input = torch.tensor([29, 3, 8, 8, 7])
        >>>ctc_probs = torch.randn((100, 31))
        >>>print(forced_align(ctc_probs, input))
    """
    y_insert_blank = insert_blank(y, blank_id)  # bigL

    log_alpha = torch.zeros((ctc_probs.size(0), len(y_insert_blank)))
    log_alpha = log_alpha - float('inf')  # log of zero, 棋盘 ,T * bigL, 存储每个节点的最大概率, 未说明的地方概率为负无穷小
    state_path = torch.zeros(  # shape(T,bigL), 棋盘, 存储每个节点的上个节点
        (ctc_probs.size(0), len(y_insert_blank)), dtype=torch.int16) - 1  # state path
    # 动态规划
    # init start state
    log_alpha[0, 0] = ctc_probs[0][y_insert_blank[0]]
    log_alpha[0, 1] = ctc_probs[0][y_insert_blank[1]]
    for t in range(1, ctc_probs.size(0)):  # 时间步, 第零个时间步已经初始化了, 所以从1开始
        for s in range(len(y_insert_blank)):  # 遍历每种状态
            """
            -  当为实数点时,可由三个箭头进入,三个箭头出
            -  当为0点时,只能是2进2出
            -  当前后两个实数相同时,强制中间间隔一个0, 不可再跨0指向
            """
            if s == 0:
                candidates = torch.tensor(
                    [log_alpha[t - 1, s], ])
                prev_state = [s, ]
            elif y_insert_blank[s] == blank_id or s == 1 or y_insert_blank[s] == y_insert_blank[s - 2]:
                candidates = torch.tensor(
                    [log_alpha[t - 1, s], log_alpha[t - 1, s - 1]])
                prev_state = [s, s - 1]
            else:
                candidates = torch.tensor([
                    log_alpha[t - 1, s],
                    log_alpha[t - 1, s - 1],
                    log_alpha[t - 1, s - 2],
                ])
                prev_state = [s, s - 1, s - 2]
            log_alpha[t, s] = torch.max(candidates) + ctc_probs[t][y_insert_blank[s]]
            state_path[t, s] = prev_state[torch.argmax(candidates)]  # 存储上个节点的位置:本,前1, 前2 的具体节点,用以倒推路径

    state_seq = -1 * torch.ones((ctc_probs.size(0), 1), dtype=torch.int16)
    candidates = torch.tensor([  # 最后一个状态节点的只能是两个箭头进入(最后一个节点为0)
        log_alpha[-1, len(y_insert_blank) - 1],
        log_alpha[-1, len(y_insert_blank) - 2]
    ])
    final_state = [len(y_insert_blank) - 1, len(y_insert_blank) - 2]
    state_seq[-1] = final_state[torch.argmax(candidates)]
    for t in range(ctc_probs.size(0) - 2, -1, -1):
        state_seq[t] = state_path[t + 1, state_seq[t + 1, 0]]

    output_alignment = []
    for t in range(0, ctc_probs.size(0)):
        output_alignment.append(y_insert_blank[state_seq[t, 0]])

    return output_alignment




if __name__ == '__main__':
    vocab_size = 30
    input = torch.tensor([29, 3, 8, 8, 7])
    ctc_probs = torch.randn((100, 31))
    print(forced_align(ctc_probs, input))
    # print(forced_align(ctc_probs, input))
