from dataclasses import dataclass, field, replace
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union

import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.distributions import Categorical

from .audio import CHUNK_LENGTH
from .tokenizer import Tokenizer, get_tokenizer
from .utils import compression_ratio

if TYPE_CHECKING:
    from .model import Whisper


@torch.no_grad()
def detect_language(
        model: "Whisper", mel: Tensor, tokenizer: Tokenizer = None
) -> Tuple[Tensor, List[dict]]:
    """
    对于指定的语音, 对其进行识别, 开头的符号就是语音_char的概率, 先屏蔽掉所有的non-language tokens,在求出最大的language tokens
    Detect the spoken language in the audio, and return them as list of strings, along with the ids
    of the most probable language tokens and the probability distribution over all language tokens.
    This is performed outside the main decode loop in order to not interfere with kv-caching.

    language_tokens : Tensor, shape = (n_audio,)
        ids of the most probable language tokens, which appears after the startoftranscript token.
    language_probs : List[Dict[str, float]], length = n_audio
        list of dictionaries containing the probability distribution over all languages.
    """
    if tokenizer is None:
        tokenizer = get_tokenizer(model.is_multilingual)
    if (
            tokenizer.language is None  # 代表单语言模式
            or tokenizer.language_token not in tokenizer.sot_sequence
            # 在tokenizer中可以看到,tokenizer的language参数是放入到了sot_seq中的
    ):
        raise ValueError(
            "This model doesn't have language tokens so it can't perform lang id"
        )

    single = mel.ndim == 2
    if single:
        mel = mel.unsqueeze(0)

    # skip encoder forward pass if already-encoded audio features were given
    if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):
        mel = model.encoder(mel)

    # forward pass using a single token, startoftranscript
    n_audio = mel.shape[0]
    x = torch.tensor([[tokenizer.sot]] * n_audio).to(mel.device)  # [n_audio, 1]
    logits = model.logits(x, mel)[:, 0]  # (n_audio, text_lens , vocab_size)
    """
    使用sot单个token作为上文进行一次前向传播,最后得到的是和text_input相同长度的logits输出,也就正是本次time节点的概率分布
    """

    # collect detected languages; suppress all non-language tokens
    mask = torch.ones(logits.shape[-1], dtype=torch.bool)
    mask[list(tokenizer.all_language_tokens)] = False
    logits[:, mask] = -np.inf
    """
    压制所有非语言种类的token
    """

    language_tokens = logits.argmax(dim=-1)
    """
    得到最大概率的语种token_id, shape: (n_audio/batch_size,)
    """

    language_token_probs = logits.softmax(dim=-1).cpu()
    language_probs = [
        {
            c: language_token_probs[i, j].item()
            for j, c in zip(tokenizer.all_language_tokens, tokenizer.all_language_codes)
        }
        for i in range(n_audio)
    ]
    """
    得到一个language_probs这么一个dic_list, 每个list元素为一个字典,表示该batch_index的内容的语种概率分布,key为语种简称,value为概率
    """

    if single:
        language_tokens = language_tokens[0]
        language_probs = language_probs[0]

    return language_tokens, language_probs


@dataclass(frozen=True)
class DecodingOptions:
    """
    Options for decoding
    解码时的选项
    """
    # whether to perform X->X "transcribe" or X->English "translate",
    # gxl: 也可以是"lang_id"
    task: str = "transcribe"

    # language that the audio is in; uses detected language if None
    language: Optional[str] = None

    # sampling-related options
    temperature: float = 0.0
    sample_len: Optional[int] = None  # maximum number of tokens to sample
    best_of: Optional[int] = None  # number of independent sample trajectories, if t > 0
    beam_size: Optional[int] = None  # number of beams in beam search, if t == 0
    patience: Optional[float] = None  # patience in beam search (arxiv:2204.05424)

    # "alpha" in Google NMT, or None for length norm, when ranking generations
    # to select which to return among the beams or best-of-N samples
    length_penalty: Optional[float] = None

    # text or tokens to feed as the prompt or the prefix; for more info:
    # https://github.com/openai/whisper/discussions/117#discussioncomment-3727051
    prompt: Optional[Union[str, List[int]]] = None  # for the previous context
    prefix: Optional[Union[str, List[int]]] = None  # to prefix the current context

    # list of tokens ids (or comma-separated token ids) to suppress
    # "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()`
    suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1"
    suppress_blank: bool = True  # this will suppress blank outputs

    # timestamp sampling options
    without_timestamps: bool = False  # use <|notimestamps|> to sample text tokens only
    max_initial_timestamp: Optional[float] = 1.0

    # implementation details
    fp16: bool = True  # use fp16 for most of the calculation


@dataclass(frozen=True)
class DecodingResult:
    """
    识别结果存放地
    """
    audio_features: Tensor
    language: str
    language_probs: Optional[Dict[str, float]] = None
    tokens: List[int] = field(default_factory=list)
    text: str = ""
    avg_logprob: float = np.nan
    no_speech_prob: float = np.nan
    temperature: float = np.nan
    compression_ratio: float = np.nan


class Inference:
    """
    推理， 传入context tokens和音频特征， 得到当前时间点的字典空间概率分布同logics_now
    """

    def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor:
        """
        Perform a forward pass on the decoder and return per-token logits
        tokens: (n_batch, n_tokens)
        audio_features: (n_batch, n_audio, n_features)
        return:
        logits: (n_batch, 1, vocab_size), 当前步骤的字典空间概率分布
        """
        raise NotImplementedError

    def rearrange_kv_cache(self, source_indices) -> None:
        """Update the key-value cache according to the updated beams"""
        raise NotImplementedError

    def cleanup_caching(self) -> None:
        """Clean up any resources or hooks after decoding is finished"""
        pass


class PyTorchInference(Inference):
    def __init__(self, model: "Whisper", initial_token_length: int):
        self.model: "Whisper" = model
        self.initial_token_length = initial_token_length
        self.kv_cache = {}
        self.hooks = []
        """
        kv_modules 全是self_attention的模块
        """
        key_modules = [block.attn.key for block in self.model.decoder.blocks]
        value_modules = [block.attn.value for block in self.model.decoder.blocks]
        self.kv_modules = key_modules + value_modules

    def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor:
        if not self.kv_cache:
            self.kv_cache, self.hooks = self.model.install_kv_cache_hooks()

        if tokens.shape[-1] > self.initial_token_length:
            # only need to use the last token except in the first forward pass
            tokens = tokens[:, -1:]
        """
        kv_cache只是在token的self-attention的时候才起作用, 其他的时候kv的固定的audio_feature, 只能说是
        锦上添花,附带没伤害罢了. kv_cache的关键作用是在每次只传入一个token上文的时候, 还能得到完整的上文信息
        =============>?????
        因为在self-attention阶段只能存储kv_cache, 而不能使用, 但是kv_cache本省就是为了在self-attention中
        使用呀, 所以这点还不是很明白
        =============> answer
        在钩子函数中是有返回值的, 这也就意味着钩子直接修改了key_linear value_linear函数的输出, 它将拼接好的
        内容丰富的k v 直接输出来用在self-attention中
        """
        return self.model.decoder(tokens, audio_features, kv_cache=self.kv_cache)

    def cleanup_caching(self):
        for hook in self.hooks:
            hook.remove()

        self.kv_cache = {}
        self.hooks = []

    def rearrange_kv_cache(self, source_indices):
        if source_indices != list(range(len(source_indices))):
            for module in self.kv_modules:
                # update the key/value cache to contain the selected sequences
                self.kv_cache[module] = self.kv_cache[module][source_indices].detach()


class SequenceRanker:
    """

    """

    def rank(
            self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]]
    ) -> List[int]:
        """
        Given a list of groups of samples and their cumulative log probabilities,
        return the indices of the samples in each group to select as the final result
        """
        raise NotImplementedError


class MaximumLikelihoodRanker(SequenceRanker):
    """
    Select the sample with the highest log probabilities, penalized using either
    a simple length normalization or Google NMT paper's length penalty
    传入一个batch_size单位的内容, 包括:
    tokens: a list of lists of tokens, (batch_size, path_num, tokens)
    sum_logprobs: a list of cumulative log probabilities, (batch_size, path_num)
    然后经过lengths对概率累计进行归一化, 接着就按概率最大原则选取最合适的path作为最终结果
    """

    def __init__(self, length_penalty: Optional[float]):
        self.length_penalty = length_penalty

    def rank(self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]]):
        """
        传入:
        sum_logprobs: a list of cumulative log probabilities, (batch_size, path_num)
        tokens: a list of lists of tokens, (batch_size, path_num, tokens)
        """

        def scores(logprobs, lengths):
            """
            传入的是
            事实上, 就没有group_size的概念, 不小心理解错了
            logprobs:一个batch的logprobs, 它的形状是(path_num, ), 每个值是当前
            路径的累计概率.
            lengths: 一个batch的lengths, 他的形状是(path_num,), 每个值的当前路径的长度
            这个评分函数的目的就是综合序列的长度, 给出一个综合的评分, 而不仅仅是概率和, 因为短的序列总是偏向
            概率更高, 毕竟概率相乘肯定是越乘越小, 这显然是个bug.
            通俗来讲, 只需要将log概率和除以长度就可以了. 但是谷歌的一个论文又发明了一个花里胡哨的公式,用来更
            加科学的定义被除是数值, 这个公式就是(5 + length) / 6 ^ length_penalty, 这个长度惩罚是个系数,
            首先明确lengths不会为0, 因为此时不会存在logprobs, 当lengths为1时, 被除数为1, 符合实际预期.
            当lengths大于1时, 底数大于1, 随着length_penalty的增大,被除数增大,进而施加的惩罚也就越大, 且有如下特点:
            当惩罚系数小于1时, 函数的增长力度不如正比函数, 属于是不进行lengths归一化, 进而导致总是优先输出短序列
            当惩罚系数大于1时, 函数的增长力度大于正比函数, 属于是过度进行长度归一化, 长度越长总的概率积反而越大, 进而鼓励有限输出长序列
            当lengths较小时, 惩罚的力度不如正比函数, 在长度较大时, 惩罚力度大于正比函数, 因为正比函数为公平的基准,
            所以该机制导致了长度较小时鼓励
            """
            result = []
            for logprob, length in zip(logprobs, lengths):
                if self.length_penalty is None:
                    penalty = length
                else:
                    # from the Google NMT paper
                    penalty = ((5 + length) / 6) ** self.length_penalty
                result.append(logprob / penalty)
            """
            这里的result其实就是一个batch的sum_logprobs, 不过是归一化过的. 
            """
            return result

        # get the sequence with the highest score
        lengths = [[len(path) for path in group] for group in tokens]
        """
        这里返回的是batch_size的path_index, 形状为(batch_size,), 其对应值的当前的这个batch_size的
        内容最后的输出结果
        """
        return [np.argmax(scores(p, l)) for p, l in zip(sum_logprobs, lengths)]


class TokenDecoder:
    def reset(self):
        """Initialize any stateful variables for decoding a new sequence"""

    def update(
            self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor
    ) -> Tuple[Tensor, bool]:
        """
        Specify how to select the next token, based on the current trace and logits

        Parameters
        ----------
        tokens : Tensor, shape = (n_batch, current_sequence_length)
            all tokens in the context so far, including the prefix and sot_sequence tokens

        logits : Tensor, shape = (n_batch, vocab_size)
            per-token logits of the probability distribution at the current step

        sum_logprobs : Tensor, shape = (n_batch)
            cumulative log probabilities for each sequence

        Returns
        -------
        tokens : Tensor, shape = (n_batch, current_sequence_length + 1)
            the tokens, appended with the selected next token

        completed : bool
            True if all sequences has reached the end of text

        """
        raise NotImplementedError

    def finalize(
            self, tokens: Tensor, sum_logprobs: Tensor
    ) -> Tuple[Sequence[Sequence[Tensor]], List[List[float]]]:
        """Finalize search and return the final candidate sequences

        Parameters
        ----------
        tokens : Tensor, shape = (n_audio, n_group, current_sequence_length)
            all tokens in the context so far, including the prefix and sot_sequence

        sum_logprobs : Tensor, shape = (n_audio, n_group)
            cumulative log probabilities for each sequence

        Returns
        -------
        tokens : Sequence[Sequence[Tensor]], length = n_audio
            sequence of Tensors containing candidate token sequences, for each audio input

        sum_logprobs : List[List[float]], length = n_audio
            sequence of cumulative log probabilities corresponding to the above

        """
        raise NotImplementedError


class GreedyDecoder(TokenDecoder):
    """
    Greedy decoding, 整体实现非常简单,
    传入
    tokens: tensor, (batch, current_length),
    logits: tensor, (batch, vocab_size), 当前时刻的logits, 在整个字典空间的概率分布，
    可能没归一化， 没log化， 但这都不重要， 这只是一种权重的分布， 只是一种比例的分布
    sum_logprobs: 当前时刻前一瞬间的每个batch的路径log概率和
    处理： 我们只需要挑出概率最大的那个token，然后附加到tokens后面即可， 仅此而已，
    对于有的batch已经eot了， 那就不在管当前节点的预测， 直接后跟eot, sum_logprobs也不在新增当前节点的概率
    """

    def __init__(self, temperature: float, eot: int):
        self.temperature = temperature
        self.eot = eot

    def update(
            self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor
    ) -> Tuple[Tensor, bool]:
        """
        Greedy decoding
        tokens: tensor, (batch, current_length)
        logits: tensor, (batch,  vocab_size)
        sum_logprobs: tensor, (batch)
        out:
        tokens: tensor, (batch, current_length + 1)
        completed: bool , 是否全部完成
        """
        if self.temperature == 0:
            next_tokens = logits.argmax(dim=-1)
        else:
            next_tokens = Categorical(logits=logits / self.temperature).sample()

        logprobs = F.log_softmax(logits.float(), dim=-1)
        current_logprobs = logprobs[torch.arange(logprobs.shape[0]), next_tokens]
        sum_logprobs += current_logprobs * (tokens[:, -1] != self.eot)
        """
        这里完成对累计概率的累加动作
        """

        next_tokens[tokens[:, -1] == self.eot] = self.eot
        tokens = torch.cat([tokens, next_tokens[:, None]], dim=-1)

        completed = (tokens[:, -1] == self.eot).all()
        return tokens, completed

    def finalize(self, tokens: Tensor, sum_logprobs: Tensor):
        """
        Greedy decoding
        input:
        tokens: tensor, (batch, current_length)
        sum_logprobs: tensor, (batch)
        return:
        tokens: tensor, (batch, current_length + 1), 专门为每一个batch单位的token添加一个eot作为最终保障
        sum_logprobs: list, (batch,)
        """
        # make sure each sequence has at least one EOT token at the end
        tokens = F.pad(tokens, (0, 1), value=self.eot)
        return tokens, sum_logprobs.tolist()


class BeamSearchDecoder(TokenDecoder):
    def __init__(
            self,
            beam_size: int,
            eot: int,
            inference: Inference,
            patience: Optional[float] = None,
    ):
        self.beam_size = beam_size
        self.eot = eot
        self.inference = inference
        self.patience = patience or 1.0
        self.max_candidates: int = round(beam_size * self.patience)
        self.finished_sequences = None

        assert (
                self.max_candidates > 0
        ), f"Invalid beam size ({beam_size}) or patience ({patience})"

    def reset(self):
        self.finished_sequences = None

    def update(
            self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor
    ) -> Tuple[Tensor, bool]:
        if tokens.shape[0] % self.beam_size != 0:
            raise ValueError(f"{tokens.shape}[0] % {self.beam_size} != 0")

        n_audio = tokens.shape[0] // self.beam_size
        if self.finished_sequences is None:  # for the first update
            self.finished_sequences = [{} for _ in range(n_audio)]

        logprobs = F.log_softmax(logits.float(), dim=-1)
        next_tokens, source_indices, finished_sequences = [], [], []
        for i in range(n_audio):
            scores, sources, finished = {}, {}, {}

            # STEP 1: calculate the cumulative log probabilities for possible candidates
            for j in range(self.beam_size):
                idx = i * self.beam_size + j
                prefix = tokens[idx].tolist()
                for logprob, token in zip(*logprobs[idx].topk(self.beam_size + 1)):
                    new_logprob = (sum_logprobs[idx] + logprob).item()
                    sequence = tuple(prefix + [token.item()])
                    scores[sequence] = new_logprob
                    sources[sequence] = idx

            # STEP 2: rank the candidates and keep the top beam_size sequences for each audio
            saved = 0
            for sequence in sorted(scores, key=scores.get, reverse=True):
                if sequence[-1] == self.eot:
                    finished[sequence] = scores[sequence]
                else:
                    sum_logprobs[len(next_tokens)] = scores[sequence]
                    next_tokens.append(sequence)
                    source_indices.append(sources[sequence])

                    saved += 1
                    if saved == self.beam_size:
                        break

            finished_sequences.append(finished)

        tokens = torch.tensor(next_tokens, device=tokens.device)
        self.inference.rearrange_kv_cache(source_indices)

        # add newly finished sequences to self.finished_sequences
        assert len(self.finished_sequences) == len(finished_sequences)
        for previously_finished, newly_finished in zip(
                self.finished_sequences, finished_sequences
        ):
            for seq in sorted(newly_finished, key=newly_finished.get, reverse=True):
                if len(previously_finished) >= self.max_candidates:
                    break  # the candidate list is full
                previously_finished[seq] = newly_finished[seq]

        # mark as completed if all audio has enough number of samples
        completed = all(
            len(sequences) >= self.max_candidates
            for sequences in self.finished_sequences
        )
        return tokens, completed

    def finalize(self, preceding_tokens: Tensor, sum_logprobs: Tensor):
        # collect all finished sequences, including patience, and add unfinished ones if not enough
        sum_logprobs = sum_logprobs.cpu()
        for i, sequences in enumerate(self.finished_sequences):
            if (
                    len(sequences) < self.beam_size
            ):  # when not enough sequences are finished
                for j in list(np.argsort(sum_logprobs[i]))[::-1]:
                    sequence = preceding_tokens[i, j].tolist() + [self.eot]
                    sequences[tuple(sequence)] = sum_logprobs[i][j].item()
                    if len(sequences) >= self.beam_size:
                        break

        tokens: List[List[Tensor]] = [
            [torch.tensor(seq) for seq in sequences.keys()]
            for sequences in self.finished_sequences
        ]
        sum_logprobs: List[List[float]] = [
            list(sequences.values()) for sequences in self.finished_sequences
        ]
        return tokens, sum_logprobs


class LogitFilter:
    def apply(self, logits: Tensor, tokens: Tensor) -> None:
        """Apply any filtering or masking to logits in-place

        Parameters
        ----------
        logits : Tensor, shape = (n_batch, vocab_size)
            per-token logits of the probability distribution at the current step

        tokens : Tensor, shape = (n_batch, current_sequence_length)
            all tokens in the context so far, including the prefix and sot_sequence tokens

        """
        raise NotImplementedError


class SuppressBlank(LogitFilter):
    def __init__(self, tokenizer: Tokenizer, sample_begin: int):
        self.tokenizer = tokenizer
        self.sample_begin = sample_begin

    def apply(self, logits: Tensor, tokens: Tensor):
        if tokens.shape[1] == self.sample_begin:
            logits[:, self.tokenizer.encode(" ") + [self.tokenizer.eot]] = -np.inf


class SuppressTokens(LogitFilter):
    def __init__(self, suppress_tokens: Sequence[int]):
        self.suppress_tokens = list(suppress_tokens)

    def apply(self, logits: Tensor, tokens: Tensor):
        logits[:, self.suppress_tokens] = -np.inf


class ApplyTimestampRules(LogitFilter):
    def __init__(
            self,
            tokenizer: Tokenizer,
            sample_begin: int,
            max_initial_timestamp_index: Optional[int],
    ):
        self.tokenizer = tokenizer
        self.sample_begin = sample_begin
        self.max_initial_timestamp_index = max_initial_timestamp_index

    def apply(self, logits: Tensor, tokens: Tensor):
        # suppress <|notimestamps|> which is handled by without_timestamps
        if self.tokenizer.no_timestamps is not None:
            logits[:, self.tokenizer.no_timestamps] = -np.inf

        # timestamps have to appear in pairs, except directly before EOT; mask logits accordingly
        for k in range(tokens.shape[0]):
            sampled_tokens = tokens[k, self.sample_begin:]
            seq = [t for t in sampled_tokens.tolist()]
            last_was_timestamp = (
                    len(seq) >= 1 and seq[-1] >= self.tokenizer.timestamp_begin
            )
            penultimate_was_timestamp = (
                    len(seq) < 2 or seq[-2] >= self.tokenizer.timestamp_begin
            )

            if last_was_timestamp:
                if penultimate_was_timestamp:  # has to be non-timestamp
                    logits[k, self.tokenizer.timestamp_begin:] = -np.inf
                else:  # cannot be normal text tokens
                    logits[k, : self.tokenizer.eot] = -np.inf
            """
            这里应用了一个规则： 如果倒数第一个是时间戳， 则判断： 如果倒数第二个还是时间戳（或者只有一个token）， 
            那么当前这个必须不是时间戳， 具体原因尚不得知，；如果倒数第二个不是时间戳，则断定当前时间点不会让语句
            结束， 必定不可能得到eot
            """

            timestamps = sampled_tokens[
                sampled_tokens.ge(self.tokenizer.timestamp_begin)
            ]
            if timestamps.numel() > 0:
                # timestamps shouldn't decrease; forbid timestamp tokens smaller than the last
                # also force each segment to have a nonzero length, to prevent infinite looping
                if last_was_timestamp and not penultimate_was_timestamp:
                    timestamp_last = timestamps[-1]
                else:
                    timestamp_last = timestamps[-1] + 1
                logits[k, self.tokenizer.timestamp_begin: timestamp_last] = -np.inf

        """
        也就是当前节点是第一个采样点， 第一个采样点必须是时间戳
        """
        if tokens.shape[1] == self.sample_begin:
            # suppress generating non-timestamp tokens at the beginning
            logits[:, : self.tokenizer.timestamp_begin] = -np.inf
            """
            设定初始采样点最大可以采到的时间戳索引
            """
            # apply the `max_initial_timestamp` option
            if self.max_initial_timestamp_index is not None:
                last_allowed = (
                        self.tokenizer.timestamp_begin + self.max_initial_timestamp_index
                )
                logits[:, last_allowed + 1:] = -np.inf

        """
        如果时间戳上的概率综合大于任意非时间戳token的概率， 则只采样时间戳
        """
        # if sum of probability over timestamps is above any other token, sample timestamp
        logprobs = F.log_softmax(logits.float(), dim=-1)
        for k in range(tokens.shape[0]):
            timestamp_logprob = logprobs[k, self.tokenizer.timestamp_begin:].logsumexp(
                dim=-1
            )
            max_text_token_logprob = logprobs[k, : self.tokenizer.timestamp_begin].max()
            if timestamp_logprob > max_text_token_logprob:
                logits[k, : self.tokenizer.timestamp_begin] = -np.inf


class DecodingTask:
    """
    解码任务，
    """
    inference: Inference
    sequence_ranker: SequenceRanker
    decoder: TokenDecoder
    logit_filters: List[LogitFilter]

    def __init__(self, model: "Whisper", options: DecodingOptions):
        self.model = model

        language = options.language or "en"
        tokenizer = get_tokenizer(
            model.is_multilingual, language=language, task=options.task
        )
        self.tokenizer: Tokenizer = tokenizer
        self.options: DecodingOptions = self._verify_options(options)

        self.n_group: int = options.beam_size or options.best_of or 1

        self.n_ctx: int = model.dims.n_text_ctx
        """
        n_ctx是指n_text_ctx
        """

        self.sample_len: int = options.sample_len or model.dims.n_text_ctx // 2
        """
        这个采样长度指的是最大的token长度, 有意思的事它是n_text_ctx//2, 这就很有意思了, 我们知道只有audio_encoder有个
        二倍的降采样, 可是text_decoder并没有呀.. 如果在option中指定sample_len则使用指定的
        ============>??????? 我不清楚这里为什么是n_text_ctx//2
        """

        self.sot_sequence: Tuple[int] = tokenizer.sot_sequence

        if self.options.without_timestamps:
            self.sot_sequence = tokenizer.sot_sequence_including_notimestamps
        """
        如果不要时间戳, 则采用sot_seq后接no_time标志的序列
        """

        self.initial_tokens: Tuple[int] = self._get_initial_tokens()
        self.sample_begin: int = len(self.initial_tokens)
        self.sot_index: int = self.initial_tokens.index(tokenizer.sot)
        """
        此处的sot_index是指在initial_tokens中sot的索引
        """

        # inference: implements the forward pass through the decoder, including kv caching
        self.inference = PyTorchInference(model, len(self.initial_tokens))

        # sequence ranker: implements how to rank a group of sampled sequences
        self.sequence_ranker = MaximumLikelihoodRanker(options.length_penalty)

        # decoder: implements how to select the next tokens, given the autoregressive distribution
        if options.beam_size is not None:
            self.decoder = BeamSearchDecoder(
                options.beam_size, tokenizer.eot, self.inference, options.patience
            )
            """
            如果设置了beam_size, 那么就使用beam search, 否者, 就使用greedy search, beam_size 的设置是个核心
            """
        else:
            self.decoder = GreedyDecoder(options.temperature, tokenizer.eot)
            """
            当温度等于0时, 也就是冰冻状态, 没有热度可言, 只能选择最大概率的出去, 如果温度不为0, 那么就有随机度可言,
            温度越大, 那么随机性越大
            """

        # logit filters: applies various rules to suppress or penalize certain tokens
        self.logit_filters = []
        if self.options.suppress_blank:
            self.logit_filters.append(SuppressBlank(self.tokenizer, self.sample_begin))
        if self.options.suppress_tokens:
            self.logit_filters.append(SuppressTokens(self._get_suppress_tokens()))
        if not options.without_timestamps:
            precision = CHUNK_LENGTH / model.dims.n_audio_ctx  # usually 0.02 seconds
            """
            这里暗示了n_andio_ctx为1500个点,对应的是30秒的时长, 注意这应该不是采样点的个数,一般以10ms为一个帧移,
            那么就是每两个帧为1个token
            """

            max_initial_timestamp_index = None
            if options.max_initial_timestamp:
                max_initial_timestamp_index = round(
                    self.options.max_initial_timestamp / precision
                )
            self.logit_filters.append(
                ApplyTimestampRules(
                    tokenizer, self.sample_begin, max_initial_timestamp_index
                )
            )

    def _verify_options(self, options: DecodingOptions) -> DecodingOptions:
        if options.beam_size is not None and options.best_of is not None:
            raise ValueError("beam_size and best_of can't be given together")
        if options.temperature == 0:
            if options.best_of is not None:
                raise ValueError("best_of with greedy sampling (T=0) is not compatible")
        if options.patience is not None and options.beam_size is None:
            raise ValueError("patience requires beam_size to be given")
        """
        length_penalty只能在0-1之间, 也就是说此时综合公式的增长率小于正比函数, 也就是不充分的长度归一化,
        也就是说会抑制长结果的输出, "长度惩罚"也就名副其实了
        """
        if options.length_penalty is not None and not (
                0 <= options.length_penalty <= 1
        ):
            raise ValueError("length_penalty (alpha) should be a value between 0 and 1")

        return options

    def _get_initial_tokens(self) -> Tuple[int]:
        """
        向sot_seq中添加前缀和前缀提示词(prefix和prompt), prefix放在sot_seq后面, prompt则放在
        其前面
        """
        tokens = list(self.sot_sequence)

        if prefix := self.options.prefix:
            prefix_tokens = (
                self.tokenizer.encode(" " + prefix.strip())
                if isinstance(prefix, str)
                else prefix
            )
            if self.sample_len is not None:
                max_prefix_len = self.n_ctx // 2 - self.sample_len
                prefix_tokens = prefix_tokens[-max_prefix_len:]
            tokens = tokens + prefix_tokens
            """
            这里使用了self.n_ctx // 2 - self.sample_len , 也就表明了, 如果没有设置sample_len这个参数, 那么这个
            等式将为零, 就不能有前缀了, 所以这里要是想设置前缀, 必须指定sample_len,且必须小于n_text_ctx//2好为前缀留空间
            """

        if prompt := self.options.prompt:
            prompt_tokens = (
                self.tokenizer.encode(" " + prompt.strip())
                if isinstance(prompt, str)
                else prompt
            )
            tokens = (
                    [self.tokenizer.sot_prev]
                    + prompt_tokens[-(self.n_ctx // 2 - 1):]
                    + tokens
            )
        """
        ==============>????
        这里对于prompt的处理确实还不是很理解
        """

        return tuple(tokens)

    def _get_suppress_tokens(self) -> Tuple[int]:
        """
        得到要被镇压的token, 首先从options配置中得到镇压列表, 如果含有-1, 则将tokenizer中的非语音token加入镇压行列
        然后将sot_seq中的token无条件得加入镇压序列, 这些只是药引, 推理生成的时候肯定不能生成他们, 包括:transcribe, translate, sot_(sot, sot_prev, sot_lm)
        接着将no_speech标志加入其中, 该标志是为了得到语句非语音的概率, 会单独处理, 推理时不需要
        """
        suppress_tokens = self.options.suppress_tokens

        if isinstance(suppress_tokens, str):
            suppress_tokens = [int(t) for t in suppress_tokens.split(",")]

        if -1 in suppress_tokens:
            suppress_tokens = [t for t in suppress_tokens if t >= 0]
            suppress_tokens.extend(self.tokenizer.non_speech_tokens)
        elif suppress_tokens is None or len(suppress_tokens) == 0:
            suppress_tokens = []  # interpret empty string as an empty list
        else:
            assert isinstance(suppress_tokens, list), "suppress_tokens must be a list"

        suppress_tokens.extend(
            [
                self.tokenizer.transcribe,
                self.tokenizer.translate,
                self.tokenizer.sot,
                self.tokenizer.sot_prev,
                self.tokenizer.sot_lm,
            ]
        )
        if self.tokenizer.no_speech is not None:
            # no-speech probability is collected separately
            suppress_tokens.append(self.tokenizer.no_speech)
            """
            无语音的概率是单独采集的, 所以在常规推理中先将其镇压
            """

        return tuple(sorted(set(suppress_tokens)))

    def _get_audio_features(self, mel: Tensor):
        """
        通过mel数据转为音频特征
        """
        if self.options.fp16:
            mel = mel.half()

        if mel.shape[-2:] == (
                self.model.dims.n_audio_ctx,
                self.model.dims.n_audio_state,
        ):
            # encoded audio features are given; skip audio encoding
            audio_features = mel
        else:
            audio_features = self.model.encoder(mel)

        if audio_features.dtype != (
                torch.float16 if self.options.fp16 else torch.float32
        ):
            return TypeError(
                f"audio_features has an incorrect dtype: {audio_features.dtype}"
            )

        return audio_features

    def _detect_language(self, audio_features: Tensor, tokens: Tensor):
        """
        对输入的音频特征进行语言检测,
        此时传入tokens的目的就是覆写其中的language_id, 通过引用机制将数据带出函数
        需要注意到是, 如果option中已经手动定义了language, 则原则上以手动设置的为主,
        仍然会返回语言概率, 但是不在更改tokens
        """
        languages = [self.options.language] * audio_features.shape[0]
        """
        初始化设置languages为在options中指定的语言, 但并不一定就是这种语言
        """

        lang_probs = None
        if self.options.language is None or self.options.task == "lang_id":
            lang_tokens, lang_probs = self.model.detect_language(
                audio_features, self.tokenizer
            )
            """
            此时返回的lang_tokens为: (n_audio,)
            lang_probs: (n_audio, dict{key:lang_code, value:prob})
            """
            languages = [max(probs, key=probs.get) for probs in lang_probs]
            if self.options.language is None:
                tokens[:, self.sot_index + 1] = lang_tokens  # write language tokens
            """
            从tokenizer.py的代码中可以知道, 在sot_seq中, sot后面紧跟的就是languageid, 当为多语种状态时,
            tokenizer需要一个language参数, 他仅仅是一个起到占位符作用的初始值, 放置到sot_seq中, 此时通过检测
            语言, 修改tokens中的sot_seq中的language_id值
            """

        return languages, lang_probs

    def _main_loop(self, audio_features: Tensor, tokens: Tensor):
        """
        主循环
        audio_feature: (n_batch, n_audio_ctx, n_state)
        tokens: (n_batch, n_ctx)
        return:
            tokens: tensor(n_batch, init_lens+sample_len), 当实际长度小于它时,后面会接一堆eot
            sum_logprobs: tensor(n_batch)
            no_speech_probs: list,(n_batch)
        """
        n_batch = tokens.shape[0]
        sum_logprobs: Tensor = torch.zeros(n_batch, device=audio_features.device)
        no_speech_probs = [np.nan] * n_batch

        try:
            for i in range(self.sample_len):
                logits = self.inference.logits(tokens, audio_features)

                if (
                        i == 0 and self.tokenizer.no_speech is not None
                ):  # save no_speech_probs,
                    probs_at_sot = logits[:, self.sot_index].float().softmax(dim=-1)
                    no_speech_probs = probs_at_sot[:, self.tokenizer.no_speech].tolist()
                    """
                    i==0时代表0时刻, 这个时刻是现实意义的0时刻, 是为了做初始准备的,
                    此时得到的logits的序列长度并不为1, 而是sot_seq的长度,具体原理参见logits()函数
                    在初始序列中, 我们认为sot这个token具有特殊意义,它经过前向传播后承载的概率分布便是
                    我们所需要的
                    """

                # now we need to consider the logits at the last token only
                logits = logits[:, -1]

                # apply the logit filters, e.g. for suppressing or applying penalty to
                for logit_filter in self.logit_filters:
                    logit_filter.apply(logits, tokens)

                # expand the tokens tensor with the selected next tokens
                tokens, completed = self.decoder.update(tokens, logits, sum_logprobs)

                if completed or tokens.shape[-1] > self.n_ctx:
                    break
                """
                这里的complete是一个bool值, 只有全部完成才为true
                """
        finally:
            self.inference.cleanup_caching()

        return tokens, sum_logprobs, no_speech_probs

    @torch.no_grad()
    def run(self, mel: Tensor) -> List[DecodingResult]:
        """
        真正的解码入口函数, 只传入一个音频数据, 然后需要得到其对应的文本结果
        """
        self.decoder.reset()
        tokenizer: Tokenizer = self.tokenizer
        n_audio: int = mel.shape[0]
        """
        这里得到的n_audio指的是音频的个数, 也就是一个batch的batch_size,上下文参数依旧是被一个固定值限定着
        """

        audio_features: Tensor = self._get_audio_features(mel)  # encoder forward pass
        tokens: Tensor = torch.tensor([self.initial_tokens]).repeat(n_audio, 1)
        """
        这里得到初始序列组成的初始tokens, 并经过广播机制扩充为(n_audio, init_token_length)
        """

        # detect language if requested, overwriting the language token
        languages, language_probs = self._detect_language(audio_features, tokens)
        if self.options.task == "lang_id":
            return [
                DecodingResult(
                    audio_features=features, language=language, language_probs=probs
                )
                for features, language, probs in zip(
                    audio_features, languages, language_probs
                )
            ]
        """
        如果只是语言内容识别工作, 则在语种检测之后直接返回即可, 数据内容只需要包括:
        audio_features: (n_audio, n_audio_ctx, n_audio_state)
        language: (n_audio,)
        language_probs: (n_audio, dict{key:lang_code, value:prob})
        """

        # repeat text tensors by the group size, for beam search or best-of-n sampling
        tokens = tokens.repeat_interleave(self.n_group, dim=0).to(audio_features.device)

        # call the main sampling loop
        tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features, tokens)

        # reshape the tensors to have (n_audio, n_group) as the first two dimensions
        audio_features = audio_features[:: self.n_group]
        no_speech_probs = no_speech_probs[:: self.n_group]
        assert audio_features.shape[0] == len(no_speech_probs) == n_audio

        tokens = tokens.reshape(n_audio, self.n_group, -1)
        sum_logprobs:Tensor = sum_logprobs.reshape(n_audio, self.n_group)

        # get the final candidates for each group, and slice between the first sampled token and EOT
        tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs)
        tokens: List[List[Tensor]] = [
            [t[self.sample_begin: (t == tokenizer.eot).nonzero()[0, 0]] for t in s]
            for s in tokens
        ]

        # select the top-ranked sample in each group
        selected = self.sequence_ranker.rank(tokens, sum_logprobs)
        tokens: List[List[int]] = [t[i].tolist() for i, t in zip(selected, tokens)]
        texts: List[str] = [tokenizer.decode(t).strip() for t in tokens]

        sum_logprobs: List[float] = [lp[i] for i, lp in zip(selected, sum_logprobs)]
        avg_logprobs: List[float] = [
            lp / (len(t) + 1) for t, lp in zip(tokens, sum_logprobs)
        ]

        fields = (
            texts,
            languages,
            tokens,
            audio_features,
            avg_logprobs,
            no_speech_probs,
        )
        if len(set(map(len, fields))) != 1:
            raise RuntimeError(f"inconsistent result lengths: {list(map(len, fields))}")

        return [
            DecodingResult(
                audio_features=features,
                language=language,
                tokens=tokens,
                text=text,
                avg_logprob=avg_logprob,
                no_speech_prob=no_speech_prob,
                temperature=self.options.temperature,
                compression_ratio=compression_ratio(text),
            )
            for text, language, tokens, features, avg_logprob, no_speech_prob in zip(
                *fields
            )
        ]


@torch.no_grad()
def decode(
        model: "Whisper",
        mel: Tensor,
        options: DecodingOptions = DecodingOptions(),
        **kwargs,
) -> Union[DecodingResult, List[DecodingResult]]:
    """
    传入30秒的音频数据和模型, 进行解码操作, 得出该音频对应的文本
    Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).

    Parameters
    ----------
    model: Whisper
        the Whisper model instance

    mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
        A tensor containing the Mel spectrogram(s)

    options: DecodingOptions
        A dataclass that contains all necessary options for decoding 30-second segments_task

    Returns
    -------
    result: Union[DecodingResult, List[DecodingResult]]
        The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
    """
    if single := mel.ndim == 2:
        mel = mel.unsqueeze(0)

    if kwargs:
        options = replace(options, **kwargs)

    result = DecodingTask(model, options).run(mel)

    return result[0] if single else result
