import math
from typing import List, Tuple, Type

import torch
from torch.nn import RNN, LSTM, GRU
from torch.nn.utils.rnn import pad_sequence

"""
author: gengxuelong
description: Utility functions for transformer
"""

IGNORE_ID = -1


def pad_list(xs: list[torch.tensor], pad_value: int) -> torch.tensor:
    """
    perform padding on a list of tensors
    please make sure all the dimension are the same except the first one
    Args:
        xs: list of tensors [(T_1,*), (T_2,*),...]
        pad_value: padding value
    Returns:
        padded tensor (B, Tmax, *)
    example:
        >>>a = torch.ones(3, 2)
        >>>b = torch.ones(2, 2)
        >>>c = torch.ones(4, 2)
        >>>print(pad_list([a, b, c]), pad_value=-1)
    """
    max_len = max([len(item) for item in xs])
    batchs = len(xs)
    pad_res = torch.zeros(batchs, max_len, *(xs[0].shape[1:]), dtype=xs[0].dtype,
                          device=xs[0].device)
    pad_res.fill_(pad_value)
    for i in range(batchs):
        pad_res[i, :len(xs[i])] = xs[i]
    return pad_res


def anti_pad_list(ys: torch.Tensor, pad_value: int) -> Tuple[List[torch.Tensor], List[int]]:
    """
    perform anto_pad for a padded tensor
    Args:
        ys: padded tensor (B, Tmax, *)
        pad_value: padding value int
    Returns:
        list of tensors [(T_1,*), (T_2,*),...]
        list of lengths [T_1, T_2,...]
    """
    lens = []
    for ys_row in ys:
        the_len = 0
        for item in ys_row:
            if not (torch.eq(item, pad_value).any()):
                the_len += 1
        lens.append(the_len)
    return [ys_row[:lens[i]] for i, ys_row in enumerate(ys)], lens


def add_blank(ys: torch.Tensor, ignore_id: int, blank_id: int) -> torch.Tensor:
    """
    prepad  blank to a tensor
    add a blank item in the first of time dimension,
    and replace ignore_id with the blank item
    Args:
        ys: tensor (B, Lmax, *)
        ignore_id: ignore id int
        blank_id: blank id int
    Returns:
        tensor (B, Lmax+1, *)
    Examples:
        >>>a = torch.ones(3, )
        >>>b = torch.ones(2, )
        >>>c = torch.ones(4, )
        >>>pad = pad_list([a, b, c], pad_value=-1)
        >>>print(pad)
        >>>print(add_blank(pad, IGNORE_ID, 0))
    """
    blanks = torch.ones(ys.shape[0], 1, *(ys.shape[2:]), dtype=ys.dtype, device=ys.device) * blank_id
    ys_blanks = torch.cat([blanks, ys], dim=1)
    ys_blanks = torch.where(ys_blanks == ignore_id, blank_id, ys_blanks)
    return ys_blanks


def add_sos_eos(ys: torch.Tensor, sos_id: int, eos_id: int,
                ignore_id: int) -> Tuple[torch.Tensor, torch.Tensor]:
    """
    add sos and eos to a tensor
    Args:
        ys: tensor (B, Lmax, *)
        sos_id: sos id int
        eos_id: eos id int
        ignore_id:  pad id , int
    Returns:
        in_tensor (B, Lmax+1, *)
        out_tensor (B, Lmax+1, *)
    Examples:
        >>>a = torch.tensor([1, 2], dtype=torch.long)
        >>>b = torch.tensor([1, 2, 3, 4, 5], dtype=torch.long)
        >>>c = torch.tensor([1, 2, 3], dtype=torch.long)
        >>>ys_pad = pad_sequence([a, b, c], batch_first=True, padding_value=-1)
        >>>print(ys_pad)
        >>>print(add_sos_eos(ys_pad, 10, 11, -1))
    """
    sos = torch.ones(1, *(ys.shape[2:]), dtype=ys.dtype, device=ys.device) * sos_id
    eos = torch.ones(1, *(ys.shape[2:]), dtype=ys.dtype, device=ys.device) * eos_id
    ys, _ = anti_pad_list(ys, ignore_id)  # anti-pad for ys
    ys_sos = []
    for ys_row in ys:
        ys_sos.append(torch.cat([sos, ys_row], dim=0))
    ys_eos = [torch.cat([ys_row, eos], dim=0) for ys_row in ys]
    ys_pad_sos = pad_list(ys_sos, pad_value=eos_id)
    ys_pad_eos = pad_list(ys_eos, pad_value=-1)
    return ys_pad_sos, ys_pad_eos


def reverse_pad_list(ys: torch.Tensor, ys_lens: torch.Tensor | List,  pad_value: float):
    """
    reverse the meaningful items of  a padded tensor's rows
    Args:
        ys: tensor (B, Lmax, *)
        pad_value: padding value int
        ys_lens: tensor (B,)
    Returns:
        tensor (B, Lmax, *)
    Examples:
        >>>a = torch.tensor([1, 2], dtype=torch.long)
        >>>b = torch.tensor([1, 2, 3, 4, 5], dtype=torch.long)
        >>>c = torch.tensor([1, 2, 3], dtype=torch.long)
        >>>ys_pad = pad_sequence([a, b, c], batch_first=True, padding_value=-1)
        >>>print(ys_pad)
        >>>print(reverse_pad_list(ys_pad, pad_value=-1, ys_lens=[2, 5, 3]))
    """
    xs = [torch.flip(row[:ys_lens[i]], [0]) for i, row in enumerate(ys)]
    return pad_list(xs, pad_value)


def th_accuracy(pad_outputs: torch.Tensor, pad_targets: torch.Tensor, ignore_label: int):
    """
    calculate accuracy
    Args:
        pad_outputs: tensor (B * Lmax, D)
        pad_targets: tensor (B, Lmax)
        ignore_label: ignore id int
    Returns:
        accuracy: float
    Examples:
        >>>a = torch.tensor([1, 2], dtype=torch.long)
        >>>b = torch.tensor([1, 2, 3, 4, 5], dtype=torch.long)
        >>>c = torch.tensor([1, 2, 3], dtype=torch.long)
        >>>ys_pad = pad_sequence([a, b, c], batch_first=True, padding_value=-1)
        >>>print(ys_pad)
        >>>print(reverse_pad_list(ys_pad, pad_value=-1, ys_lens=[2, 5, 3]))
    """
    pad_outputs = pad_outputs.view(pad_targets.shape[0], pad_targets.shape[1], pad_outputs.shape[-1])
    predict_ids = torch.argmax(pad_outputs, dim=-1)
    mask = pad_targets != ignore_label  # reserved the places that are true
    is_right = (predict_ids[mask] == pad_targets[mask])
    accuracy = torch.sum(is_right).float() / mask.sum().float()
    return accuracy.item()


def get_rnn(rnn_type: str) -> Type[RNN | LSTM | GRU]:
    """
    get rnn module
    """
    assert rnn_type in ["rnn", "lstm", "gru"]
    if rnn_type == "rnn":
        return torch.nn.RNN
    elif rnn_type == "lstm":
        return torch.nn.LSTM
    else:
        return torch.nn.GRU


def get_activation(act):
    """Return activation function."""
    activation_funcs = {
        "hardtanh": torch.nn.Hardtanh,
        "tanh": torch.nn.Tanh,
        "relu": torch.nn.ReLU,
        "selu": torch.nn.SELU,
        "swish": getattr(torch.nn, "SiLU"),
        "gelu": torch.nn.GELU
    }
    return activation_funcs[act]()


def get_subsample(config):
    """
     Return the downsampling factor based on the name of the encoder
    input layer specified in the configuration file.
    """
    input_layer = config["encoder_conf"]["input_layer"]
    assert input_layer in ["conv2d", "conv2d6", "conv2d8"]
    if input_layer == "conv2d":
        return 4
    elif input_layer == "conv2d6":
        return 6
    elif input_layer == "conv2d8":
        return 8


def remove_duplicates_and_blank(hyp: List[int]) -> List[int]:
    """
     Remove consecutive duplicate elements and elements equal to 0
     Example:
        >>>a = [1, 1, 1, 2, 3, 2, 4, 3, 0, 4, 3, 3, 3]
        >>>print(remove_duplicates_and_blank(a))
    """
    cur = 0
    hyp_new = []
    while cur < len(hyp):
        if hyp[cur] != 0:
            hyp_new.append(hyp[cur])
        pre = cur
        while cur < len(hyp) and hyp[cur] == hyp[pre]:
            cur += 1
    return hyp_new


def log_add(args: List[int]) -> float:
    """
    stable log add
    """
    if all(x == -float('inf') for x in args):
        return -float('inf')
    max_val = max(args)
    lsp = math.log(sum(math.exp(x - max_val) for x in args))
    return max_val + lsp


if __name__ == '__main__':
    a = [1, 1, 1, 2, 3, 2, 4, 3, 0, 4, 3, 3, 3]
    print(log_add(a))
