# Copyright (c) 2025 Wenet Community. authors: Mddct(Dinghao Zhou)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Optional, Tuple, Dict

import torch
from wenet.transformer.asr_model import ASRModel
from wenet.transformer.ctc import CTC
from wenet.transformer.decoder import TransformerDecoder
from wenet.transformer.encoder import ConformerEncoder
from wenet.utils.common import IGNORE_ID
from torch import nn

class CaptionHead(nn.Module):
    def __init__(self, input_size: int, output_size: int = 8):
        super().__init__()
        self.attn = nn.Linear(input_size, 1)
        self.classifier = nn.Linear(input_size, output_size)
        self.criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    def forward(self, x, x_lens, target):
        # x: (B, T, D), x_lens: (B,), target: (B,)
        B, T, _ = x.size()
        attn_scores = self.attn(x).squeeze(-1)  # (B, T)
        # 创建 attention mask
        mask = torch.arange(T, device=x.device).unsqueeze(0) >= x_lens.unsqueeze(1)  # (B, T), bool
        attn_scores = attn_scores.masked_fill(mask, float('-inf'))  # 无效位置设为 -inf

        attn_weights = torch.softmax(attn_scores, dim=1).unsqueeze(-1)  # (B, T, 1)
        x_weighted = (x * attn_weights).sum(dim=1)  # (B, D)
        out = self.classifier(x_weighted)  # (B, 8)
        loss = self.criterion(out, target)

        return loss, out
    def generate(self, x, x_lens, target=None):
        # x: (B, T, D), x_lens: (B,), target: (B,)
        B, T, _ = x.size()
        attn_scores = self.attn(x).squeeze(-1)  # (B, T)
        # 创建 attention mask
        mask = torch.arange(T, device=x.device).unsqueeze(0) >= x_lens.unsqueeze(1)  # (B, T), bool
        attn_scores = attn_scores.masked_fill(mask, float('-inf'))  # 无效位置设为 -inf

        attn_weights = torch.softmax(attn_scores, dim=1).unsqueeze(-1)  # (B, T, 1)
        x_weighted = (x * attn_weights).sum(dim=1)  # (B, D)
        out = self.classifier(x_weighted)  # (B, 8)
        hyp = torch.argmax(out, dim=1)  # shape: (B,)
        result_list = []
        if target is not None:
            for ref_val, hyp_val in zip(target.tolist(), hyp.tolist()):
                result_list.append({
                    "ref": ref_val,
                    "hyp": hyp_val,
                    "if_same": ref_val == hyp_val
                })
        else:
            for hyp_val in hyp.tolist():
                result_list.append({
                    "ref": None,
                    "hyp": hyp_val,
                    "if_same": None
                })
        return result_list


class TransformerCaptionHead(nn.Module):
    def __init__(self, input_size: int, output_size: int = 8, num_layers: int = 2, num_heads: int = 4):
        super().__init__()
        encoder_layer = nn.TransformerEncoderLayer(d_model=input_size, nhead=num_heads, batch_first=True)
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        self.cls_token = nn.Parameter(torch.randn(1, 1, input_size))
        self.classifier = nn.Linear(input_size, output_size)

    def forward(self, x):
        # x: (B, T, D)
        B = x.size(0)
        cls_tokens = self.cls_token.expand(B, -1, -1)  # (B, 1, D)
        x = torch.cat([cls_tokens, x], dim=1)  # (B, T+1, D)
        x = self.transformer(x)  # (B, T+1, D)
        out = self.classifier(x[:, 0])  # (B, 8)
        return out
from gxl_ai_utils.utils import utils_file

class FireReadModel(ASRModel):

    def __init__(
        self,
        vocab_size: int,
        encoder: ConformerEncoder,
        decoder: TransformerDecoder,
        ctc: Optional[CTC] = None,
        ctc_weight: float = 0.5,
        ignore_id: int = IGNORE_ID,
        reverse_weight: float = 0.0,
        lsm_weight: float = 0.0,
        length_normalized_loss: bool = False,
        special_tokens: Optional[dict] = None,
    ):
        super().__init__(vocab_size, encoder, decoder, ctc, ctc_weight,
                         ignore_id, reverse_weight, lsm_weight,
                         length_normalized_loss, special_tokens)
        assert reverse_weight == 0.0
        assert special_tokens is not None
        self.sos = special_tokens["sos"]
        self.eos = special_tokens["eos"]
        self.decode_maxlen = self.decoder.embed[1].max_len
        self.caption_head = CaptionHead(self.encoder.output_size(), 8)
        print(f'sos: {self.sos}, eos: {self.eos}, decode_maxlen: {self.decode_maxlen}')

    @torch.jit.unused
    def forward_encoder_chunk(
        self,
        xs: torch.Tensor,
        offset: int,
        required_cache_size: int,
        att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
        cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        raise NotImplementedError('FiredASR don\'t support streaming')

    @torch.jit.unused
    def forward(
        self,
        batch: dict,
        device: torch.device,
    ) -> Dict[str, Optional[torch.Tensor]]:
        """Frontend + Encoder + Decoder + Calc loss"""
        speech = batch['feats'].to(device)
        speech_lengths = batch['feats_lengths'].to(device)

        # 1. Encoder
        encoder_out, encoder_mask = self.encoder(speech, speech_lengths)
        encoder_out_lens = encoder_mask.squeeze(1).sum(1)

        task_batch = batch['tasks']
        # 断言task_batch list中内容都是一样的
        assert len(set(task_batch)) == 1
        task = task_batch[0]
        utils_file.logging_limit_print(f"task_name: {task}")

        if task == "<TRANSCRIBE> <CAPTION>":
            """"""
            caption_ids = batch['caption_ids'].to(device)
            caption_tags = batch['caption_tags']
            caption_loss,_ = self.caption_head(encoder_out, encoder_out_lens, caption_ids)
            utils_file.logging_limit_print(f"进行caption分类，caption_tags: {caption_tags}, caption_ids: {caption_ids}, caption_loss: {caption_loss}")
            return {
                "loss": caption_loss,
                "loss_caption": caption_loss,
            }

        elif task == "<TRANSCRIBE>":
            """"""
            text = batch['target'].to(device)
            text_lengths = batch['target_lengths'].to(device)

            assert text_lengths.dim() == 1, text_lengths.shape
            # Check that batch_size is unified
            assert (speech.shape[0] == speech_lengths.shape[0] == text.shape[0] ==
                    text_lengths.shape[0]), (speech.shape, speech_lengths.shape,
                                             text.shape, text_lengths.shape)
            # 2a. CTC branch
            if self.ctc_weight != 0.0:
                loss_ctc, ctc_probs = self.ctc(encoder_out, encoder_out_lens, text,
                                               text_lengths)
            else:
                loss_ctc, ctc_probs = None, None

            # 2b. Attention-decoder branch
            # use non blank (token level) embedding for decoder
            if self.apply_non_blank_embedding:
                assert self.ctc_weight != 0
                assert ctc_probs is not None
                encoder_out, encoder_mask = self.filter_blank_embedding(
                    ctc_probs, encoder_out)
            if self.ctc_weight != 1.0:
                loss_att, acc_att = self._calc_att_loss(
                    encoder_out, encoder_mask, text, text_lengths,)
            else:
                loss_att = None
                acc_att = None

            if loss_ctc is None:
                loss = loss_att
            elif loss_att is None:
                loss = loss_ctc
            else:
                loss = self.ctc_weight * loss_ctc + (1 -
                                                     self.ctc_weight) * loss_att
            return {
                "loss": loss,
                "loss_att": loss_att,
                "loss_ctc": loss_ctc,
                "th_accuracy": acc_att,
            }
