#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020/3/30
# @Author  : geekhch
# @Email   : geekhch@qq.com
# @Desc    : this file is rewrite from tensorflow, refer to
# https://github.com/tensorflow/addons/blob/4dd4b3f01c3ab4fe04c11a8515291a6502684145/tensorflow_addons/text/crf.py#L131

from torch import (Tensor, )
import torch
from utils import device


def crf_hidden_score(tag_indices, sequence_lengths, inputs):
    '''
    compute the inputs score
    :param tag_indices:
    :param sequence_lengths:
    :param inputs:
    :return:
    '''
    batch_size = inputs.shape[0]
    max_seq_len = inputs.shape[1]

    tag_indices = tag_indices.type(torch.int64)
    sequence_lengths = sequence_lengths.type(torch.int)

    tag_indices = tag_indices.unsqueeze(-1)
    hidden_socres = inputs.gather(2, tag_indices).squeeze()  # [batch_size, max_len]

    # mask
    score_indices = torch.arange(0, max_seq_len).view(1, max_seq_len).expand(hidden_socres.shape)

    mask = (score_indices < sequence_lengths.unsqueeze(-1)).to(device) # [batch_size, 1]
    
    hidden_socres = torch.where(mask, hidden_socres, torch.zeros_like(hidden_socres))

    return hidden_socres.sum(dim = 1)  # [batch_size, ]


def crf_transition_score(tag_indices, sequence_lengths, transition_matrix):
    max_seq_len = tag_indices.shape[1]
    batch_size = tag_indices.shape[0]
    tag_indices = tag_indices.type(torch.int64)
    sequence_lengths = sequence_lengths.type(torch.int)

    source_tags = tag_indices[:, :-1]
    dest_tags = tag_indices[:, 1:]

    transition_scores = transition_matrix[source_tags, dest_tags]

    score_indices = torch.arange(0, max_seq_len-1).view(1, -1).expand(transition_scores.shape) # [batch_size, max_seq_len-1]
    mask = (score_indices <= sequence_lengths.unsqueeze(-1) - 2).to(device)
    transition_scores = torch.where(mask, transition_scores, torch.zeros_like(transition_scores))

    return transition_scores.sum(dim=1)



def crf_sequence_score(inputs, tag_indices, sequence_lengths, transition_matrix):
    '''
    Computes the unnormalized score for a tag sequence.
    :param inputs:A [batch_size, max_seq_len, num_tags] tensor of unary potentials
        to use as input to the CRF layer.
    :param tag_indies: A [batch_size, max_seq_len] matrix of tag indices for which
        we compute the log-likelihood.
    :param sequence_lengths: A [batch_size] vector of true sequence lengths.
    :param transition_matrix: A [num_tags, num_tags] transition matrix
    :return: sequence_scores: A [batch_size] vector of unnormalized sequence scores.
    '''
    num_tags = inputs.shape[2]
    tag_indices = tag_indices.type(torch.int64)
    sequence_lengths = sequence_lengths.type(torch.int)

    def _single_seq_fn():
        batch_size = inputs.shape[0]
        batch_inds = torch.arange(0, batch_size, dtype=torch.int).view(batch_size, 1)

        sequence_scores = inputs[batch_inds, 0, tag_indices]

        sequence_scores[sequence_lengths < 1] = 0
        return sequence_scores.squeeze()

    def _multi_seq_fn():
        # Compute the scores of the given tag sequence.
        hidden_scores = crf_hidden_score(tag_indices, sequence_lengths, inputs)
        transition_scores = crf_transition_score(tag_indices, sequence_lengths, transition_matrix)
        sequence_scores = hidden_scores + transition_scores
        return sequence_scores

    return _single_seq_fn() if inputs.shape[1] == 1 else _multi_seq_fn()


def crf_forward(inputs, state, transition_matrix, sequence_lengths) -> Tensor:
    """Computes the alpha values in a linear-chain CRF.
    See http://www.cs.columbia.edu/~mcollins/fb.pdf for reference.
    Args:
      inputs: A [batch_size, max_len-1, num_tags] matrix of unary potentials.
      state: A [batch_size, num_tags] matrix containing the previous alpha
         values.
      transition_params: A [num_tags, num_tags] matrix of binary potentials.
          This matrix is expanded into a [1, num_tags, num_tags] in preparation
          for the broadcast summation occurring within the cell.
      sequence_lengths: A [batch_size] vector of true sequence lengths.
    Returns:
      new_alphas: A [batch_size, num_tags] matrix containing the
          new alpha values.
    """
    sequence_lengths = sequence_lengths.type(torch.long)
    batch_size = sequence_lengths.shape[0]
    last_index = torch.where(sequence_lengths>0,
                             sequence_lengths-1, torch.zeros_like(sequence_lengths))

    inputs = inputs.transpose(0, 1)  # [max_len-1, batch_size, num_tags]
    transition_matrix = transition_matrix.unsqueeze(0)  # [1, num_tags, num_tags]

    def _scan_fn(_state, _inputs):
        '''
        :param _state:  [batch_size, num_tags]
        :param _inputs: [batch_size, num_tags]
        :return:
        '''
        _state = _state.unsqueeze(2) # [batch_size, num_tags, 1]
        transition_scores = _state + transition_matrix  # [batch_size, num_tags, num_tags]
        new_alphas = _inputs + torch.logsumexp(transition_scores, 1)  # [batch_size, num_tags]
        return new_alphas

    all_alphas = torch.empty((inputs.shape), dtype=state.dtype, device=device)
    all_alphas = torch.cat((state.unsqueeze(0), all_alphas))
    for i in range(inputs.shape[0]):
        all_alphas[i+1] = _scan_fn(all_alphas[i], inputs[i])
    all_alphas.transpose_(0, 1)  # [batch_size, max_len-1, num_tags]


    return all_alphas[range(batch_size), last_index.squeeze()]


def crf_log_norm(inputs, sequence_lengths, transition_matrix):
    sequence_lengths = sequence_lengths.type(torch.int)
    # Split up the first and rest of the inputs in preparation for the forward
    # algorithm.
    first_input = inputs[:, 0, :].squeeze()  # (batch_size, num_tags)

    # If max_seq_len is 1, we skip the algorithm and simply reduce_logsumexp
    # over the "initial state" (the unary potentials).
    def _single_seq_fn():
        log_norm = torch.logsumexp(first_input, 1)
        # Mask `log_norm` of the sequences with length <= zero.
        log_norm = torch.where(
            torch.le(sequence_lengths, 0), torch.zeros_like(log_norm), log_norm
        )
        return log_norm

    def _multi_seq_fn():
        """Forward computation of alpha values."""
        rest_of_input = inputs[:, 1:, :]
        # Compute the alpha values in the forward algorithm in order to get the
        # partition function.

        alphas = crf_forward(
            rest_of_input, first_input, transition_matrix, sequence_lengths
        )
        log_norm = torch.logsumexp(alphas, 1)
        # Mask `log_norm` of the sequences with length <= zero.
        log_norm = torch.where(
            torch.le(sequence_lengths, 0).to(device), torch.zeros_like(log_norm, device=device), log_norm
        )
        return log_norm

    return _single_seq_fn() if inputs.shape[1]==1 else _multi_seq_fn()


def crf_neg_log_likelihood(
        inputs: Tensor,
        tag_indices: Tensor,
        sequence_lengths: Tensor,
        transition_matrix: Tensor
        ) -> (Tensor, Tensor):
    '''
    Computes the log-likelihood of tag sequences in a CRF.
    :param inputs:A [batch_size, max_seq_len, num_tags] tensor of unary potentials
        to use as input to the CRF layer.
    :param tag_indices: A [batch_size, max_seq_len] matrix of tag indices for which
        we compute the log-likelihood.
    :param sequence_lengths: A [batch_size] vector of true sequence lengths.
    :param transition_matrix: A [num_tags, num_tags] transition matrix
    :return:
        log_likelihood: A [batch_size] `Tensor` containing the negative log-likelihood of
            each example, given the sequence of tag indices.
        transition_params: A [num_tags, num_tags] transition matrix. This is
            either provided by the caller or created in this function.
    '''
    num_tags = inputs.shape[2]
    tag_indices = tag_indices.type(torch.int64)
    sequence_lengths = sequence_lengths.type(torch.int)

    sequence_scores = crf_sequence_score(
        inputs, tag_indices, sequence_lengths, transition_matrix
    )
    log_norm = crf_log_norm(inputs, sequence_lengths, transition_matrix)

    # Normalize the scores to get the log-likelihood per example.
    log_likelihood = log_norm - sequence_scores
    return log_likelihood, transition_matrix

def viterbi_decode(inputs: Tensor, transition_matrix: Tensor):
    """Decode the highest scoring sequence of tags outside of TensorFlow.
    This should only be used at test time.
    Args:
      inputs: A [seq_len, num_tags] matrix of unary potentials.
      transition_params: A [num_tags, num_tags] matrix of binary potentials.
    Returns:
      viterbi: A [seq_len] list of integers containing the highest scoring tag
          indices.
      viterbi_score: A float containing the score for the Viterbi sequence.
    """
    trellis = torch.zeros_like(inputs)
    backpointers = torch.zeros_like(inputs, dtype=torch.int32)
    trellis[0] = inputs[0]

    for t in range(1, inputs.shape[0]):
        v = trellis[t - 1].unsqueeze(1) + transition_matrix  # [numtags, numtags]
        max_scores, _ = torch.max(v, 0)
        trellis[t] = inputs[t] + max_scores
        backpointers[t] = torch.argmax(v, 0)

    viterbi = [torch.argmax(trellis[-1]).item()]
    for bp in reversed(backpointers[1:]):
        viterbi.append(bp[viterbi[-1]].item())
    viterbi.reverse()

    viterbi_score = trellis[-1].max()
    return viterbi, viterbi_score