"""ESPnet2 ASR Transducer model."""

import logging
import sys
from contextlib import contextmanager
from typing import Dict, List, Optional, Tuple, Union

import torch

from packaging.version import parse as V
from typeguard import typechecked

from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.asr.specaug.abs_specaug import AbsSpecAug
from espnet2.asr_transducer.decoder.abs_decoder import AbsDecoder
from espnet2.asr_transducer.encoder.encoder import Encoder
from espnet2.asr_transducer.joint_network import JointNetwork, JointNetworkContainer
from espnet2.asr_transducer.utils import get_transducer_task_io
from espnet2.asr_transducer.error_calculator import ErrorCalculator
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.train.abs_espnet_model import AbsESPnetModel

if V(torch.__version__) >= V("1.6.0"):
    from torch.amp import autocast
else:

    @contextmanager
    def autocast(enabled=True):
        yield


class ESPnetASRMTDTModel(AbsESPnetModel):
    """ESPnet2ASRTransducerModel module definition.

    Args:
        vocab_size: Size of complete vocabulary (w/ SOS/EOS and blank included).
        token_list: List of tokens in vocabulary (minus reserved tokens).
        frontend: Frontend module.
        specaug: SpecAugment module.
        normalize: Normalization module.
        encoder: Encoder module.
        decoder: Decoder module.
        joint_network: Joint Network module.
        transducer_weight: Weight of the Transducer loss.
        use_k2_pruned_loss: Whether to use k2 pruned Transducer loss.
        k2_pruned_loss_args: Arguments of the k2 loss pruned Transducer loss.
        warmup_steps: Number of steps in warmup, used for pruned loss scaling.
        validation_nstep: Maximum number of symbol expansions at each time step
                          when reporting CER or/and WER using mAES.
        fastemit_lambda: FastEmit lambda value.
        auxiliary_ctc_weight: Weight of auxiliary CTC loss.
        auxiliary_ctc_dropout_rate: Dropout rate for auxiliary CTC loss inputs.
        auxiliary_lm_loss_weight: Weight of auxiliary LM loss.
        auxiliary_lm_loss_smoothing: Smoothing rate for LM loss' label smoothing.
        ignore_id: Initial padding ID.
        sym_space: Space symbol.
        sym_blank: Blank Symbol.
        report_cer: Whether to report Character Error Rate during validation.
        report_wer: Whether to report Word Error Rate during validation.
        extract_feats_in_collect_stats: Whether to use extract_feats stats collection.

    """

    @typechecked
    def __init__(
        self,
        vocab_size: int,
        token_list: Union[Tuple[str, ...], List[str]],
        frontend: Optional[AbsFrontend],
        specaug: Optional[AbsSpecAug],
        normalize: Optional[AbsNormalize],
        encoder: Encoder,
        decoder: AbsDecoder,
        joint_network_container: JointNetworkContainer,
        use_k2_pruned_loss: bool = False,
        k2_pruned_loss_args: Dict = {},
        warmup_steps: int = 25000,
        validation_nstep: int = 2,
        fastemit_lambda: float = 0.0,
        auxiliary_ctc_weight: float = 0.0,
        auxiliary_ctc_dropout_rate: float = 0.0,
        auxiliary_lm_loss_weight: float = 0.0,
        auxiliary_lm_loss_smoothing: float = 0.05,
        ignore_id: int = -1,
        sym_space: str = "<space>",
        sym_blank: str = "<blank>",
        report_cer: bool = False,
        report_wer: bool = False,
        extract_feats_in_collect_stats: bool = True,
        blank_id: int = 0,
        clamp: float = -1.0,
        # MTP related
        max_duration: int = 15,
        mtp_weight: List = [],
        mdp_weight: List = [],
        pred_num: int = 1,
    ) -> None:
        """Construct an ESPnetASRTransducerModel object."""
        super().__init__()

        # The following labels ID are reserved:
        #    - 0: Blank symbol.
        #    - 1: Unknown symbol.
        #    - vocab_size - 1: SOS/EOS symbol.
        self.vocab_size = vocab_size
        self.ignore_id = ignore_id
        self.token_list = token_list.copy()

        self.sym_space = sym_space
        self.sym_blank = sym_blank

        self.frontend = frontend
        self.specaug = specaug
        self.normalize = normalize

        self.encoder = encoder
        self.decoder = decoder
        self.joint_network_container = joint_network_container

        self.criterion_transducer = None
        self.error_calculator = None

        self.use_auxiliary_ctc = auxiliary_ctc_weight > 0
        self.use_auxiliary_lm_loss = auxiliary_lm_loss_weight > 0

        if use_k2_pruned_loss:
            self.am_proj = torch.nn.Linear(
                encoder.output_size,
                vocab_size,
            )

            self.lm_proj = torch.nn.Linear(
                decoder.output_size,
                vocab_size,
            )

            self.warmup_steps = warmup_steps
            self.steps_num = -1

            self.k2_pruned_loss_args = k2_pruned_loss_args
            self.k2_loss_type = k2_pruned_loss_args.get("loss_type", "regular")

        self.use_k2_pruned_loss = use_k2_pruned_loss

        if self.use_auxiliary_ctc:
            self.ctc_lin = torch.nn.Linear(encoder.output_size, max_duration+1)
            self.ctc_dropout_rate = auxiliary_ctc_dropout_rate

        if self.use_auxiliary_lm_loss:
            self.lm_lin = torch.nn.Linear(decoder.output_size, vocab_size)

            eps = auxiliary_lm_loss_smoothing / (vocab_size - 1)

            self.lm_loss_smooth_neg = eps
            self.lm_loss_smooth_pos = (1 - auxiliary_lm_loss_smoothing) + eps

        self.fastemit_lambda = fastemit_lambda

        self.auxiliary_ctc_weight = auxiliary_ctc_weight
        self.auxiliary_lm_loss_weight = auxiliary_lm_loss_weight

        self.report_cer = report_cer
        self.report_wer = report_wer
        self.validation_nstep = validation_nstep

        self.extract_feats_in_collect_stats = extract_feats_in_collect_stats

        self.blank_id = blank_id
        self.clamp = clamp

        # MTP related
        self.pred_num = pred_num
        self.mtp_weight = mtp_weight
        self.mdp_weight = mdp_weight
        self.max_duration = max_duration

        logging.info(f"pred_num:{self.pred_num}")
        logging.info(f"mtp_weight:{self.mtp_weight}")
        logging.info(f"mdp_weight:{self.mdp_weight}")
        logging.info(f"max_duration:{self.max_duration}")

    def forward(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        text: torch.Tensor,
        text_lengths: torch.Tensor,
        # phone: torch.Tensor,
        # phone_lengths: torch.Tensor,
        duration: torch.Tensor,
        duration_lengths: torch.Tensor,
        **kwargs,
    ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
        """Forward architecture and compute loss(es).

        Args:
            speech: Speech sequences. (B, S)
            speech_lengths: Speech sequences lengths. (B,)
            text: Label ID sequences. (B, L)
            text_lengths: Label ID sequences lengths. (B,)
            kwargs: Contains "utts_id".

        Return:
            loss: Main loss value.
            stats: Task statistics.
            weight: Task weights.

        """
        assert text_lengths.dim() == 1, text_lengths.shape
        assert (
            speech.shape[0]
            == speech_lengths.shape[0]
            == text.shape[0]
            == text_lengths.shape[0]
        ), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
        assert (text.shape[1] == duration.shape[1]), (text.shape, duration.shape)

        batch_size = speech.shape[0]
        text = text[:, : text_lengths.max()]

        # 1. Encoder
        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)

        # 2. Transducer-related I/O preparation
        decoder_in, target, t_len, u_len = get_transducer_task_io(
            text,
            encoder_out_lens,
            ignore_id=self.ignore_id,
            blank_id=self.blank_id
        )

        # 3. Decoder
        self.decoder.set_device(encoder_out.device)
        decoder_out = self.decoder(decoder_in)

        # 4. Joint Network and RNNT loss computation
        # logging.info(f"text :{text.size()}")
        # logging.info(f"duration :{duration.size()}")
        # logging.info(f"encoder_out: {encoder_out.size()}")
        # logging.info(f"decoder_out: {decoder_out.size()}")

        mtp_loss = []
        mdp_loss = []
        if self.use_k2_pruned_loss:
            # logging.info(f"u_len:{u_len}")
            # Multi token pred related
            for i in range(self.pred_num):
                mtp_loss.append(
                    self._calc_k2_transducer_pruned_loss(
                        encoder_out,
                        decoder_out[:, i:, :].contiguous(),
                        text[:, i:].contiguous(),
                        t_len,
                        u_len - i,
                        self.joint_network_container.joint_networks[i],
                        **self.k2_pruned_loss_args
                    )
                )
            logging.info(f"mtp_loss:{mtp_loss}")     
            # Multi duration pred related
            for i in range(self.pred_num):
                mtp_loss.append(
                    self._calc_k2_transducer_pruned_loss(
                        encoder_out,
                        decoder_out[:, i:, :].contiguous(),
                        duration[:, i:].contiguous(),
                        t_len,
                        u_len - i,
                        self.joint_network_container.joint_networks[i],
                        **self.k2_pruned_loss_args
                    )
                )
            logging.info(f"mdp_loss:{mdp_loss}")
        else:
            joint_outs = self.joint_network_container(
                encoder_out.unsqueeze(2), decoder_out.unsqueeze(1)
            )

            # Multi token related
            u_lens = []
            targets = []
            d_targets = []
            token_outs = []
            duration_outs = []
            for i in range(self.pred_num):
                u_lens.append(u_len - i)
                targets.append(target[:, i:].contiguous())
                # d_targets.append(duration[:, i:].contiguous().int())
                token_outs.append(joint_outs[i][:,:,i:,:self.vocab_size].contiguous().float())
                duration_outs.append(joint_outs[i][:,:,i:,self.vocab_size:].contiguous().float())

            # logging.info(f"u_lens:{u_lens}")
            # logging.info(f"duration size:{duration.size()}")    
            # logging.info(f"token_outs[{i}] size:{token_outs[i].dtype}")
            # sys.exit(0)
            # for i in range(self.pred_num):
            #     logging.info(f"targets[{i}] size:{targets[i].size()}")
            #     logging.info(f"token_outs[{i}] size:{token_outs[i].size()}")
            #     logging.info(f"duration_outs[{i}] size:{duration_outs[i].size()}")

            # MTP loss computation
            if any(weight > 0.0 for weight in self.mtp_weight):
                from warp_rnnt import rnnt_loss
                for i in range(self.pred_num):
                    mtp_loss.append(
                        rnnt_loss(
                            log_probs = token_outs[i],
                            labels = targets[i],
                            frames_lengths = t_len,
                            labels_lengths = u_lens[i],
                            reduction='mean',
                            blank=self.blank_id,
                            fastemit_lambda=0.1,
                        ).squeeze()
                    )
            else:
                mtp_loss = [0.0] * self.pred_num

            # MDP loss computation
            if any(weight > 0.0 for weight in self.mdp_weight):
                from warp_rnnt import rnnt_loss
                mdp_loss = []
                for i in range(self.pred_num):
                    mdp_loss.append(
                        rnnt_loss(
                            log_probs = duration_outs[i],
                            labels = d_targets[i],
                            frames_lengths = t_len,
                            labels_lengths = u_lens[i],
                            reduction='mean',
                            blank=self.blank_id,
                            fastemit_lambda=self.fastemit_lambda,
                        ).squeeze()
                    )
            else:
                mdp_loss = [0.0] * self.pred_num

        # 5. Auxiliary losses
        loss_ctc, loss_lm = 0.0, 0.0

        # if self.use_auxiliary_ctc:
        #     loss_ctc = self._calc_ctc_loss(
        #         encoder_out,
        #         target,
        #         t_len,
        #         u_len,
        #     )

        if self.use_auxiliary_lm_loss:
            loss_lm = self._calc_lm_loss(decoder_out, target)

        sum_mtp_loss, sum_mdp_loss = 0.0, 0.0 
        for i in range(self.pred_num):
            sum_mtp_loss += self.mtp_weight[i] * mtp_loss[i]
            sum_mdp_loss += self.mdp_weight[i] * mdp_loss[i]

        loss = (sum_mtp_loss + sum_mdp_loss)
        # logging.info(f"sum_mtp_loss: {sum_mtp_loss}")
        # logging.info(f"sum_mdp_loss: {sum_mdp_loss}")
        # logging.info(f"loss: {loss}")
        # sys.exit(0)

        # 6. CER/WER computation.
        if self.report_cer or self.report_wer:

            if self.use_k2_pruned_loss and self.k2_loss_type == "modified":
                self.validation_nstep = 1

            # Train PER computation
            # if self.training and self.error_calculator is None:

            #     self.error_calculator = ErrorCalculator(
            #         self.decoder,
            #         self.joint_network_container.joint_networks[0],
            #         self.token_list,
            #         self.sym_space,
            #         self.sym_blank,
            #         beam_size=1,
            #         nstep=self.validation_nstep,
            #         expansion_beta=0,
            #         report_cer=self.report_cer,
            #         report_wer=self.report_wer,
            #     )
            
            # Valid PER computation
            if not self.training and self.error_calculator is None:

                self.error_calculator = ErrorCalculator(
                    self.decoder,
                    self.joint_network_container.joint_networks[0],
                    self.token_list,
                    self.sym_space,
                    self.sym_blank,
                    beam_size=2,
                    nstep=self.validation_nstep,
                    expansion_beta=2,
                    report_cer=self.report_cer,
                    report_wer=self.report_wer,
                )

            # cer_transducer, der = self.error_calculator(
            #     encoder_out, duration, t_len, True
            # )

                cer_transducer, per = self.error_calculator(
                    encoder_out, target, t_len, False
                )
            else:
                cer_transducer, per = None, None
        else:
            cer_transducer, per = None, None

        stats = dict(
            loss=loss.detach(),
            loss_mtp_sum=sum_mtp_loss.detach() if sum_mtp_loss > 0.0 else None,
            loss_mdp_sum=sum_mdp_loss.detach() if sum_mdp_loss > 0.0 else None,
            loss_mtp0=mtp_loss[0].detach() if mtp_loss[0] > 0.0 else None,
            loss_mtp1=mtp_loss[1].detach() if mtp_loss[1] > 0.0 else None,
            loss_aux_ctc=loss_ctc.detach() if loss_ctc > 0.0 else None,
            loss_aux_lm=loss_lm.detach() if loss_lm > 0.0 else None,
            cer_transducer=cer_transducer,
            per=per,
            # der=der,
        )
        # force_gatherable: to-device and to-tensor if scalar for DataParallel
        loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)

        return loss, stats, weight

    def collect_feats(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """Collect features sequences and features lengths sequences.

        Args:
            speech: Speech sequences. (B, S)
            speech_lengths: Speech sequences lengths. (B,)
            text: Label ID sequences. (B, L)
            text_lengths: Label ID sequences lengths. (B,)
            kwargs: Contains "utts_id".

        Return:
            {}: "feats": Features sequences. (B, T, D_feats),
                "feats_lengths": Features sequences lengths. (B,)

        """
        if self.extract_feats_in_collect_stats:
            feats, feats_lengths = self._extract_feats(speech, speech_lengths)
        else:
            # Generate dummy stats if extract_feats_in_collect_stats is False
            logging.warning(
                "Generating dummy stats for feats and feats_lengths, "
                "because encoder_conf.extract_feats_in_collect_stats is "
                f"{self.extract_feats_in_collect_stats}"
            )

            feats, feats_lengths = speech, speech_lengths

        return {"feats": feats, "feats_lengths": feats_lengths}

    def encode(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """Encoder speech sequences.

        Args:
            speech: Speech sequences. (B, S)
            speech_lengths: Speech sequences lengths. (B,)

        Return:
            encoder_out: Encoder outputs. (B, T, D_enc)
            encoder_out_lens: Encoder outputs lengths. (B,)

        """
        with autocast(device_type="cuda", enabled=False):

            # logging.info(f"raw speech: {speech.size()}")
            # logging.info(f"raw speech_lengths: {speech_lengths.size()}")
            # 1. Extract feats
            feats, feats_lengths = self._extract_feats(speech, speech_lengths)
            # logging.info(f"extract feats: {feats.size()}")
            # logging.info(f"extract feats: {feats_lengths}")

            # 2. Data augmentation
            if self.specaug is not None and self.training:
                feats, feats_lengths = self.specaug(feats, feats_lengths)
                # logging.info(f"specaug: {feats.size()}")
                # logging.info(f"specaug: {feats_lengths}")

            # 3. Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
            if self.normalize is not None:
                feats, feats_lengths = self.normalize(feats, feats_lengths)
                # logging.info(f"normalize: {feats.size()}")
                # logging.info(f"normalize: {feats_lengths}")

        # 4. Forward encoder
        encoder_out, encoder_out_lens = self.encoder(feats, feats_lengths)
        
        # logging.info(f"encoder: {encoder_out.size()}")
        # logging.info(f"encoder: {encoder_out_lens}")

        assert encoder_out.size(0) == speech.size(0), (
            encoder_out.size(),
            speech.size(0),
        )
        assert encoder_out.size(1) <= encoder_out_lens.max(), (
            encoder_out.size(),
            encoder_out_lens.max(),
        )

        return encoder_out, encoder_out_lens

    def _extract_feats(
        self, speech: torch.Tensor, speech_lengths: torch.Tensor
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """Extract features sequences and features sequences lengths.

        Args:
            speech: Speech sequences. (B, S)
            speech_lengths: Speech sequences lengths. (B,)

        Return:
            feats: Features sequences. (B, T, D_feats)
            feats_lengths: Features sequences lengths. (B,)

        """
        assert speech_lengths.dim() == 1, speech_lengths.shape

        # for data-parallel
        speech = speech[:, : speech_lengths.max()]

        if self.frontend is not None:
            feats, feats_lengths = self.frontend(speech, speech_lengths)
        else:
            feats, feats_lengths = speech, speech_lengths

        return feats, feats_lengths

    def _calc_transducer_loss(
        self,
        encoder_out: torch.Tensor,
        joint_out: torch.Tensor,
        target: torch.Tensor,
        t_len: torch.Tensor,
        u_len: torch.Tensor,
    ) -> torch.Tensor:
        """Compute Transducer loss.

        Args:
            encoder_out: Encoder output sequences. (B, T, D_enc)
            joint_out: Joint Network output sequences (B, T, U, D_joint)
            target: Target label ID sequences. (B, L)
            t_len: Encoder output sequences lengths. (B,)
            u_len: Target label ID sequences lengths. (B,)

        Return:
            loss_transducer: Transducer loss value.

        """
        if self.criterion_transducer is None:
            try:
                from torchaudio.transforms import RNNTLoss

                self.criterion_transducer = RNNTLoss(
                    size_average=True,
                    blank_label=self.blank_id,
                    reduction="mean",
                    fastemit_lambda=0.0,
                )
            except ImportError:
                logging.error(
                    "warp-transducer was not installed. "
                    "Please consult the installation documentation."
                )
                exit(1)

        with autocast(device_type="cuda", enabled=False):
            loss_transducer = self.criterion_transducer(
                joint_out.float(),
                target,
                t_len,
                u_len,
            )

        return loss_transducer

    def _calc_k2_transducer_pruned_loss(
        self,
        encoder_out: torch.Tensor,
        decoder_out: torch.Tensor,
        labels: torch.Tensor,
        encoder_out_len: torch.Tensor,
        decoder_out_len: torch.Tensor,
        joint_network: JointNetwork,
        prune_range: int = 5,
        simple_loss_scaling: float = 0.5,
        lm_scale: float = 0.0,
        am_scale: float = 0.0,
        loss_type: str = "regular",
        reduction: str = "mean",
        padding_idx: int = 0,
    ) -> torch.Tensor:
        """Compute k2 pruned Transducer loss.

        Args:
            encoder_out: Encoder output sequences. (B, T, D_enc)
            decoder_out: Decoder output sequences. (B, T, D_dec)
            labels: Label ID sequences. (B, L)
            encoder_out_len: Encoder output sequences lengths. (B,)
            decoder_out_len: Target label ID sequences lengths. (B,)
            prune_range: How many tokens by frame are used compute the pruned loss.
            simple_loss_scaling: The weight to scale the simple loss after warm-up.
            lm_scale: The scale factor to smooth the LM part.
            am_scale: The scale factor to smooth the AM part.
            loss_type: Define the type of path to take for loss computation.
                         (Either 'regular', 'smoothed' or 'constrained')
            padding_idx: SOS/EOS + Padding index.

        Return:
            loss_transducer: Transducer loss value.

        """
        try:
            import k2

            if self.fastemit_lambda > 0.0:
                logging.info(
                    "Disabling FastEmit, it is not available with k2 Transducer loss. "
                    "Please see delay_penalty option instead."
                )
        except ImportError:
            logging.error(
                "k2 was not installed. Please consult the installation documentation."
            )
            exit(1)

        # Note (b-flo): We use a dummy scaling scheme until the training parts are
        # revised (in a short future).
        self.steps_num += 1

        if self.steps_num < self.warmup_steps:
            pruned_loss_scaling = 0.1 + 0.9 * (self.steps_num / self.warmup_steps)
            simple_loss_scaling = 1.0 - (
                (self.steps_num / self.warmup_steps) * (1.0 - simple_loss_scaling)
            )
        else:
            pruned_loss_scaling = 1.0

        labels_unpad = [y[y != self.ignore_id].tolist() for y in labels]

        target = k2.RaggedTensor(labels_unpad).to(decoder_out.device)
        target_padded = target.pad(mode="constant", padding_value=padding_idx)
        target_padded = target_padded.to(torch.int64)

        boundary = torch.zeros(
            (encoder_out.size(0), 4),
            dtype=torch.int64,
            device=encoder_out.device,
        )
        boundary[:, 2] = decoder_out_len
        boundary[:, 3] = encoder_out_len

        lm = self.lm_proj(decoder_out)
        am = self.am_proj(encoder_out)

        with autocast(device_type="cuda", enabled=False):
            simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed(
                lm.float(),
                am.float(),
                target_padded,
                padding_idx,
                lm_only_scale=lm_scale,
                am_only_scale=am_scale,
                boundary=boundary,
                rnnt_type=loss_type,
                reduction=reduction,
                return_grad=True,
            )

        ranges = k2.get_rnnt_prune_ranges(
            px_grad,
            py_grad,
            boundary,
            prune_range,
        )

        am_pruned, lm_pruned = k2.do_rnnt_pruning(
            joint_network.lin_enc(encoder_out),
            joint_network.lin_dec(decoder_out),
            ranges,
        )

        joint_out = joint_network(am_pruned, lm_pruned, no_projection=True)

        with autocast(device_type="cuda", enabled=False):
            pruned_loss = k2.rnnt_loss_pruned(
                joint_out.float(),
                target_padded,
                ranges,
                padding_idx,
                boundary,
                rnnt_type=loss_type,
                reduction=reduction,
            )

        loss_transducer = (
            simple_loss_scaling * simple_loss + pruned_loss_scaling * pruned_loss
        )

        return loss_transducer

    def _calc_ctc_loss(
        self,
        encoder_out: torch.Tensor,
        target: torch.Tensor,
        t_len: torch.Tensor,
        u_len: torch.Tensor,
    ) -> torch.Tensor:
        """Compute CTC loss.

        Args:
            encoder_out: Encoder output sequences. (B, T, D_enc)
            target: Target label ID sequences. (B, L)
            t_len: Encoder output sequences lengths. (B,)
            u_len: Target label ID sequences lengths. (B,)

        Return:
            loss_ctc: CTC loss value.

        """
        ctc_in = self.ctc_lin(
            torch.nn.functional.dropout(encoder_out, p=self.ctc_dropout_rate)
        )
        ctc_in = torch.log_softmax(ctc_in.transpose(0, 1), dim=-1)

        target_mask = target != 0
        ctc_target = target[target_mask].cpu()

        with torch.backends.cudnn.flags(deterministic=True):
            try:
                loss_ctc = torch.nn.functional.ctc_loss(
                    ctc_in,
                    ctc_target,
                    t_len,
                    u_len,
                    zero_infinity=True,
                    reduction="sum",
                )
            except Exception as e:
                logging.info(f"Exception occurred: {e}")
                sys.exit(0)
        loss_ctc /= target.size(0)

        return loss_ctc

    def _calc_lm_loss(
        self,
        decoder_out: torch.Tensor,
        target: torch.Tensor,
    ) -> torch.Tensor:
        """Compute LM loss (i.e.: Cross-entropy with smoothing).

        Args:
            decoder_out: Decoder output sequences. (B, U, D_dec)
            target: Target label ID sequences. (B, L)

        Return:
            loss_lm: LM loss value.

        """
        batch_size = decoder_out.size(0)

        logp = torch.log_softmax(
            self.lm_lin(decoder_out[:, :-1, :]).view(-1, self.vocab_size),
            dim=1,
        )
        target = target.view(-1).type(torch.int64)
        ignore = (target == 0).unsqueeze(1)

        with torch.no_grad():
            true_dist = logp.clone().fill_(self.lm_loss_smooth_neg)

            true_dist.scatter_(1, target.unsqueeze(1), self.lm_loss_smooth_pos)

        loss_lm = torch.nn.functional.kl_div(logp, true_dist, reduction="none")
        loss_lm = loss_lm.masked_fill(ignore, 0).sum() / batch_size

        return loss_lm

    # def _calc_per_train(
    #     self,
    #     joint_out: torch.Tensor,
    #     target: torch.Tensor,
    # ) -> float:
        
    #     batch_size = joint_out.size(0)
    #     enc_len = joint_out.size(1)
    #     # for batch_idx in range(batch_size):
    #     #     sample_jt = joint_out[batch_idx, :, :-1, :].squeeze(0)
    #     #     for t in range(enc_len):
    #     pass

