import base64
import gzip
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch import Tensor
from .decoding import decode as decode_function
from .decoding import detect_language as detect_language_function
from .transcribe import transcribe as transcribe_function


@dataclass
class ModelDimensions:
    n_mels: int
    n_audio_ctx: int
    n_audio_state: int
    n_audio_head: int
    n_audio_layer: int
    n_vocab: int
    n_text_ctx: int
    n_text_state: int
    n_text_head: int
    n_text_layer: int


class LayerNorm(nn.LayerNorm):
    """
    层标准化
    """

    def forward(self, x):
        return super().forward(x.float()).type(x.dtype)


class Linear(nn.Linear):
    """
    线性模块
    """

    def forward(self, x):
        return F.linear(
            x,
            self.weight.to(x.dtype),
            None if self.bias is None else self.bias.to(x.dtype),
        )


class Conv1d(nn.Conv1d):
    """
    一维卷积
    """

    def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
        return super()._conv_forward(
            input, weight.to(input.dtype), None if bias is None else bias.to(input.dtype)
        )


def sinusoids(length, channels, max_timescale=100000):
    """
    正弦曲线
    """
    assert channels % 2 == 0  # channel 是复数
    log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)  # 这是一个用于计算不同频率的正弦波的增量值。
    inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
    scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
    return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)


class MultiHeadAttention(nn.Module):
    def __init__(self, n_state: int, n_head: int):
        super().__init__()
        self.n_head = n_head
        self.query = Linear(n_state, n_state)
        self.key = Linear(n_state, n_state, bias=False)
        self.value = Linear(n_state, n_state)
        self.out = Linear(n_state, n_state)

    def forward(self, x: Tensor, xa: Optional[Tensor] = None, kv_cache: Optional[Tensor] = None,
                mask: Optional[Tensor] = None):
        """
        time1和time2不相同
        x : torch.LongTensor, shape = (batch_size, n_ctx1, n_state)
        xa : torch.Tensor, shape = (batch_size, n_audio_ctx, n_state)|None
        kv_cache : dict, key为self.key, value为self.value ,均为nn.Module对象
        mask : torch.Tensor, shape = (n_ctx, n_ctx)->self_attention_mask
        return：
        wv: (b, n_ctx1, n_state), 经过权重化的结果
        qk: (b, h, n_ctx1, time2), 未经过softmax的权重
        """
        if kv_cache is not None and self.key in kv_cache and self.value in kv_cache and xa is not None:
            """
            只有在Cross attention中才用到kv_cache, 根据最新理解的代码,这并不正确, kv_cache不论是在
            self-attention还是cross-attention都是需要的, 尤其是self-attention, token的自回归中
            尤其的需要
            这里要求当xa存在时(也就是cross-attention时),才使用cache中的k v, 这是因为在self-attention时, 是属于k v 的收集和保存截断
            在cross-attention的res_attn_block中, 是含有两个attn的, 一个self-attention, 一个才是cross-attention,
            先在self-attention中收集k v,因为收集k v的钩子挂在了key_linear value_linear上, 得在他们前向传播的
            时候才可以收集.
            """
            k = kv_cache[self.key]
            v = kv_cache[self.value]
        else:
            k, v = self.key(x if xa is None else xa), self.value(x if xa is None else xa)
        q = self.query(x)
        wv, qk = self.qkv_attention(q, k, v, mask)
        return self.out(wv), qk

    def qkv_attention(self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None):
        """
        q : torch.Tensor, shape = (batch_size, n_ctx1, n_state)
        k : torch.Tensor, shape = (batch_size, n_ctx2, n_state)
        v : torch.Tensor, shape = (batch_size, n_ctx2, n_state)
        mask : torch.Tensor, shape = (n_ctx1, n_ctx1)-> self_attention_mask
        return:
        wv: (b, n_ctx1, n_state), 经过权重化的结果
        qk: (b, h, n_ctx1, n_ctx2), 未经过softmax的权重
        """
        batch_size, n_ctx1, n_state = q.shape
        scale = (n_state // self.n_head) ** -0.25
        # (batch, head, ctx1, little_state)
        q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) * scale
        # (batch, head, little_state, ctx2)
        k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 3, 1) * scale
        # (batch, head, ctx2, little_state)
        v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
        qk = torch.matmul(q, k)  # (batch, head, ctx1, ctx2)
        """
        这里mask只用到了n_ctx1, 也就暗示了只有self-attention才会用到mask, 在cross-attention中不会用到
        cross-attention 是不用mask的, 在训练时, 猜测传入的数据没有padding, 而都是切分成30秒整的完整数据
        在推理时, 因为音频没有padding, 所以也就用不到cross-mask. 据代码所致, 推理时, 传入的文字首先是起始
        文字,也就是各种sot sequence, 得到当前时间步的预测, 然后每次只取最新的一个token, 作为输入, 因为此时kv_cache
        就会起作用, 这个变量保存了之前的kv ,
        """
        if mask is not None:
            qk = qk + mask[:n_ctx1, :n_ctx1]
        qk = qk.float()
        qk_soft = torch.softmax(qk, dim=-1).to(q.dtype)
        wv = torch.matmul(qk_soft, v)  # (batch, head, ctx1, little_state)
        wv = wv.permute(0, 2, 1, 3).flatten(start_dim=2)  # (batch, ctx1, head * little_state)
        return wv, qk.detach()


class ResidualAttentionBlock(nn.Module):
    """
    残差注意力块， 实现功能如下：
    有三个模块： 1、self-attention; 2、cross-attention; 3、多层感知机
    根据参数可选是否进行cross-attention
    在进入网络模块之前， 首先进行层标准化
    """

    def __init__(self, n_state: int, n_head: int, cross_attention: bool = False):
        super().__init__()
        self.attn = MultiHeadAttention(n_state, n_head)
        self.attn_ln = LayerNorm(n_state)
        self.cross_attn = MultiHeadAttention(n_state, n_head) if cross_attention else None
        self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
        n_mlp_latent = 4 * n_state
        self.mlp = nn.Sequential(
            Linear(n_state, n_mlp_latent), nn.GELU(), Linear(n_mlp_latent, n_state)
        )
        self.mlp_ln = LayerNorm(n_state)

    def forward(self, x, xa: Optional[Tensor] = None, mask: Optional[Tensor] = None, kv_cache: Optional[Tensor] = None):
        """
        只有在self-attention中才用到mask, 在cross-attention中不会用,但是self-attention和cross-attention
        都是需要kv_cache的
        """
        x = self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)[0] + x
        if self.cross_attn is not None:
            x = self.cross_attn(self.cross_attn_ln(x), xa, kv_cache=kv_cache)[0] + x
        x = self.mlp(self.mlp_ln(x)) + x
        return x


class AudioEncoder(nn.Module):
    """
    音频编码器
    输入： (batch_size, mels, n_ctx)
    输出: (batch_size, n_audio_ctx, n_audio_state)
    首先用卷积进行降采样, 并将通道数转为: mels->n_state->n_state
    然后经过若干的自注意力块(自注意+mlp)
    最后经过层标准化
    """

    def __init__(self, n_mel: int, n_ctx: int, n_state: int, n_head: int, n_layer: int, ):
        super().__init__()
        self.conv1 = Conv1d(n_mel, n_state, kernel_size=3, padding=1)
        self.conv2 = Conv1d(n_state, n_state, kernel_size=3, padding=1, stride=2)
        self.blocks = nn.ModuleList([ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)])
        self.ln_post = LayerNorm(n_state)
        """
        前面有个减半采样的卷积, 可是这里的位置编码函数却没有减半长度, 而且后面要求输入的长度和位置编码函数的
        长度一致, 这里也许不是个错误, 因为我在tokenizer.py模块看到代码似乎把两个mel帧作为一个audio_token
        """
        self.register_buffer("positional_embedding",
                             sinusoids(n_ctx, n_state))

    def forward(self, x: Tensor):
        """
        x : torch.Tensor, shape = (batch_size, mels, n_ctx)
        """
        x = self.conv1(x)
        x = F.gelu(x)
        x = self.conv2(x)
        x = F.gelu(x)
        x = x.permute(0, 2, 1)
        assert x.shape[1:] == self.positional_embedding.shape, "incorrect audio shape"
        x = (x + self.positional_embedding).to(x.dtype)
        for attn in self.blocks:
            x = attn(x)
        x = self.ln_post(x)
        return x


def do_test_multi_head_attention():
    model = MultiHeadAttention(512, 8)
    input = torch.randn(123, 10, 512)
    xa = torch.randn(123, 10, 512)
    mask = torch.randn(10, 10)
    kv_cache = {}
    wv, qk = model(input, xa, kv_cache, mask)
    print(wv.shape, qk.shape)


def do_test_audio_encoder():
    model = AudioEncoder(80, 1000, 512, 8, 2)
    input = torch.randn(123, 80, 1000)
    output = model(input)
    print(output.shape)


class TextDecoder(nn.Module):
    def __init__(self, n_vocab: int, n_ctx: int, n_state: int, n_head: int, n_layer: int):
        super().__init__()
        self.token_embedding = nn.Embedding(n_vocab, n_state)
        self.positional_embedding = nn.Parameter(torch.empty(n_ctx, n_state))
        self.blocks = nn.ModuleList(
            [ResidualAttentionBlock(n_state, n_head, cross_attention=True) for _ in range(n_layer)])
        self.ln = LayerNorm(n_state)
        self.register_buffer('mask', torch.empty(n_ctx, n_ctx).fill_(float("-inf")).triu_(diagonal=1),
                             persistent=False)  # triu: triangle upper, 上三角,不包括正对角线

    def forward(self, x: torch.LongTensor, xa: Tensor, kv_cache: Optional[dict] = None):
        """
        x : torch.Tensor, shape = (batch_size, n_ctx) , token
        xa : torch.Tensor, shape = (batch_size, n_audio_ctx, state) , audio_feature
        kv_cache : dict
        return:
        logits : torch.Tensor, shape = (batch_size, n_ctx, n_vocab)
        """
        offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0
        x = self.token_embedding(x) + self.positional_embedding[offset:offset + x.shape[-1]]
        x = x.to(xa.dtype)
        for attn in self.blocks:
            x = attn(x, xa, mask=self.mask, kv_cache=kv_cache)
        x = self.ln(x)
        # x: (b, n_ctx, n_state), logits: (b, n_ctx, n_vocab)
        logits = torch.matmul(x, self.token_embedding.weight.to(x.dtype).T)
        return logits


def do_test_text_decoder():
    model = TextDecoder(51865, 20, 512, 8, 2)
    input = torch.randint(0, 51865, (123, 20))
    xa = torch.randn(123, 20, 512)
    kv_cache = None
    logits = model(input, xa, kv_cache)
    print(logits.shape)


class Whisper(nn.Module):
    detect_language = detect_language_function
    transcribe = transcribe_function
    decode = decode_function

    def __init__(self, dims: ModelDimensions):
        super().__init__()
        self.dims = dims
        self.encoder = AudioEncoder(dims.n_mels, dims.n_audio_ctx, dims.n_audio_state, dims.n_audio_head,
                                    dims.n_audio_layer)
        self.decoder = TextDecoder(dims.n_vocab, dims.n_text_ctx, dims.n_text_state, dims.n_text_head,
                                   dims.n_text_layer)
        all_head = torch.zeros(dims.n_text_layer, dims.n_text_head, dtype=torch.bool)
        all_head[dims.n_text_layer // 2:] = True
        self.register_buffer("alignment_heads", all_head.to_sparse_coo(), persistent=False)

    def set_alignment_heads(self, dump: bytes):
        """
        将一个一个记录到文件中的all_head数组加载出来并赋值给all_head
        , 不晓得如何得到base85编码的字符串
        """
        # 这段代码的目的是从一个 base85 编码的字符串中恢复出一个布尔型数组，并存储在变量 array 中。
        array = np.frombuffer(
            gzip.decompress(base64.b85decode(dump)), dtype=bool
        ).copy()

        mask = torch.from_numpy(array).reshape(
            self.dims.n_text_layer, self.dims.n_text_head
        )
        self.register_buffer("alignment_heads", mask.to_sparse(), persistent=False)

    def embed_audio(self, mel: Tensor):
        """
        mel: torch.Tensor, shape = (batch_size, n_mels, n_ctx)
        将mel数据编码为audio特征
        """
        return self.encoder(mel)

    def logits(self, tokens: Tensor, audio_feature: Tensor):
        """
        tokens: torch.Tensor, shape = (batch_size, n_ctx)
        audio_feature: torch.Tensor, shape = (batch_size, n_audio_ctx, n_audio_state)
        returns
        logits: torch.Tensor, shape = (batch_size, n_ctx, n_vocab)
        """
        return self.decoder(tokens, audio_feature)

    def forward(self, mels: Tensor, tokens: Tensor):
        audio_feature = self.embed_audio(mels)
        logits = self.logits(tokens, audio_feature)
        return logits

    @property
    def device(self):
        return next(self.parameters()).device

    @property
    def is_multilingual(self):
        return self.dims.n_vocab == 51865

    def install_kv_cache_hooks(self, cache: Optional[dict] = None):
        """
        安装kvcache的钩子
        解释:
        对于decoder来说, 他的所有multiheadattention模块都会进行如下操作(包括self_attn和cross_attn):
        对其key_linear模块和value_linear模块进行处理, 然后将其结果保存到kv_cache中, 每次进行一次前向传播
        就将当次传播的输出放入kv_cache,
        """
        cache = {**cache} if cache is not None else {}
        hooks = []

        def save_to_cache(module: nn.Module, _, output: Tensor):
            """"""
            """
            保存output到cache,
             output.shape[1] != self.dim.n_text_ctx , 也就是输出维度为n_audio_ctx指的是此时是cross-attention,或者是音频特征的
             self-attention, key_linear/value_linear的输入是audio_feature,当为cross-attention时, 此时只需要保存着audio_feature
             的表示, 接下来token自回归时就不用重复传入audio_feature了; 当为audio-feature的self-attention时, 情况也是类似, self-attention的
             k v都是自己全部, 把自己存起来, 就不用每次都看向自己了
             至于说module not in cache, 这很好理解, 就是当前module不在cache中, 然后肯定得先放入

             接着,
             当情况是token的self-attention时, 此时就是kv_cache真正起作用的时候了, 他要把每次key_linear/value_linear的
             输出(batch, n_text_ctx, n_text_state)放入kv_cache,并且沿着维度1进行拼接, 也及时说, 在
            """
            if module not in cache or output.shape[1] != self.dims.n_text_ctx:
                cache[module] = output.detach()
            else:
                cache[module] = torch.cat([cache[module], output.detach()], dim=1).detach()
            """
            这里的返回值可不一般, 他直接修改了模块输出的内容, 将拼接过cache的内容直接输出出来作为全新的内容丰富的key value;
            自此, kv_cache如何利用的问题就解决了
            """
            return cache[module]

        def install_hooks(layer: nn.Module):
            if isinstance(layer, MultiHeadAttention):
                hooks.append(layer.value.register_forward_hook(save_to_cache))
                hooks.append(layer.key.register_forward_hook(save_to_cache))

        self.decoder.apply(install_hooks)
        return cache, hooks


def do_test_whisper():
    model = Whisper(ModelDimensions(n_mels=80, n_audio_ctx=100, n_audio_state=12, n_audio_head=2, n_audio_layer=2,
                                    n_text_ctx=20, n_text_state=12, n_text_head=2, n_text_layer=2, n_vocab=12345))
    input = torch.randn(123, 80, 100)
    tokens = torch.randint(0, 12345, (123, 20))
    logits = model(input, tokens)
    print(logits.shape)


if __name__ == '__main__':
    """"""
    do_test_whisper()
