"""
Full definition of a GPT Language Model, all of it in this single file.
References:
1) the official GPT-2 TensorFlow implementation released by OpenAI:
https://github.com/openai/gpt-2/blob/master/src/model.py
2) huggingface/transformers PyTorch implementation:
https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py

训练过程：forward
推理过程：generate

**Transformer**
1. 训练过程
    encoder输入样本：B * T * C
    decoder输入标签：B * T * 1
    模型输出：B * T * vocab_size

    然后模型输出和标签求交叉熵损失

2. 推理过程，假设指定想要模型输出长度为T的结果
    因为transformer的encoder必须要输入原始语料，则：
    1) 先从原始语料中选取一段长度为T的文本，作为encoder输入：
    encoder输入样本：B * T * C，经过encoder，生成B * T * C的memory

    2) 进行推理
    对每次推理都使用1)中生成的`memory`，重复T次推理
        对第1次推理，encoder输入`memory`，decoder输入prompt，假设为换行符'\n'
        然后模型输出（取最后一个时间步）：B * 1 * vocab_size，与词汇表对照得到输出单词，对于character-level的模型，假设为'm'
        第2次encoder输入`memory`, decoder输入'\nm'，仍然取最后一个时间步并对照词汇表，假设输出'a'
        第3次encoder输入`memory`, decoder输入'\nma'，假设输出'c'
        第4次encoder输入`memory`, decoder输入'\nmac'，假设输出'h'
        ...
        第8次encoder输入`memory`, decoder输入'\nmachin'，假设输出'e'

        如果T等于8，则最终输出为'\nmachine'，总共8个字符


**GPT**
GPT仅包含了decoder架构
1. 训练过程
    输入样本：B * T * C
    标签：B * T * 1 (每个1都代表真实token)
    模型输出：B * T * vocab_size

    然后模型输出和标签求交叉熵损失

2. 推理过程，假设指定想要模型输出长度为T的结果

    重复T次推理
        对第1次推理，GPT输入prompt，假设为换行符'\n'
        然后GPT输出（取最后一个时间步）：B * 1 * vocab_size，与词汇表对照得到输出单词，对于character-level的模型，假设为'm'
        第2次GPT输入'\nm'，仍然取最后一个时间步并对照词汇表，假设输出'a'
        第3次GPT输入'\nma'，假设输出'c'
        第4次GPT输入'\nmac'，假设输出'h'
        ...
        第8次GPT输入'\nmachin'，假设输出'e'

        如果T等于8，则最终输出为'\nmachine'，总共8个字符


"""

import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from model_config import GPTConfig


def _init_weights(module):
    if isinstance(module, nn.Linear):
        torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
        if module.bias is not None:
            torch.nn.init.zeros_(module.bias)
    elif isinstance(module, nn.Embedding):
        torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)


class LayerNorm(nn.Module):
    """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """

    def __init__(self, ndim, bias):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(ndim))
        self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None

    def forward(self, input):
        return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)


class MultiHeadAttention(nn.Module):
    """
    multi-head dot-product attention
    can be self attention or normal attention; can be masked attention or non-masked attention
    """

    def __init__(self, config):
        super().__init__()
        # whether it is self-attention or normal attention
        assert hasattr(config, 'n_embd')
        # default values for q/k/v size and num_hidden are all n_embd
        query_size = getattr(config, 'query_size', config.n_embd)
        key_size = getattr(config, 'key_size', config.n_embd)
        value_size = getattr(config, 'value_size', config.n_embd)
        num_hidden = getattr(config, 'num_hidden', config.n_embd)

        assert num_hidden % config.n_head == 0
        self.W_q = nn.Linear(query_size, num_hidden, bias=config.bias)  # query projection
        self.W_k = nn.Linear(key_size, num_hidden, bias=config.bias)  # key projection
        self.W_v = nn.Linear(value_size, num_hidden, bias=config.bias)  # value projection
        self.W_o = nn.Linear(num_hidden, num_hidden, bias=config.bias)  # output projection

        # regularization
        self.attn_dropout = nn.Dropout(config.dropout)
        self.resid_dropout = nn.Dropout(config.dropout)
        self.n_head = config.n_head
        self.n_embd = config.n_embd
        self.dropout = config.dropout
        # flash attention make GPU go brr but support is only in PyTorch >= 2.0
        self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
        if not self.flash:
            print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
            # causal mask to ensure that attention is only applied to the left in the input sequence
            self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
                                 .view(1, 1, config.block_size, config.block_size))

    def transpose_qkv(self, X):
        """
        为了多注意⼒头的并⾏计算⽽变换形状
        计算attention的核心就是，将n_head维度从embedding(C)维度中分割出，实现n_head维度的并行计算

        输⼊X的形状:(batch_size，查询或者"键－值"对的个数, num_hiddens)
        输出X的形状:(batch_size，n_head, 查询或者“键－值”对的个数(T), num_hiddens/n_head (hs))
        """
        X = X.reshape(X.shape[0], X.shape[1], self.n_head, -1)
        X = X.permute(0, 2, 1, 3)
        return X

    def forward(self, queries, keys, values, mask: bool = True):
        """
        calculate query, key, values for all heads in batch and move head forward to be the batch dim

        1. input: q, k, v is the queries, keys, values, if self-att, nq == nkv.
        q: (B, num of query(nq), query_size)
        k: (B, num of key-value pair(nkv), key_size)
        v: (B, num of key-value pair(nkv), value_size)

        2. through a linear layer W_q/W_k/W_v, this will make the embedding size of q, k, v all equals to num_hidden
        q: (B, nq, num_hidden)
        k: (B, nkv, num_hidden)
        v: (B, nkv, num_hidden)

        3. transpose qkv for parallel computation
        q: (B, n_head, nq, num_hiddens/n_head)
        k: (B, n_head, nkv, num_hiddens/n_head)
        v: (B, n_head, nkv, num_hiddens/n_head)
        where num_hiddens/n_head := hs

        4. (mask) and calculate dot-product attention
        # all the matrix operations are on the last two dims, with B and n_head parallelized.
        (I). qk^T: (B, n_head, nq, hs) @ (B, n_head, hs, nkv) -> (B, n_head, nq, nkv)
        (II). mask(softmax(qk^T)) @ v: (B, n_head, nq, nkv) @ (B, n_head, nkv, hs) -> (B, n_head, nq, hs)

        5. reshape the output
        mask(softmax(qk^T)) @ v: (B, n_head, nq, hs) -> (B, nq, n_head * hs), where `n_head * hs` equals `num_hiddens`

        6. output projection
        (B, nq, num_hiddens) @ (num_hiddens, num_hiddens) -> (B, nq, num_hiddens)

        Therefore, the final output shape is (B, nq, num_hiddens)

        To sum up, the dot-product attention transforms input

        q: (B, num of query(nq), query_size)
        k: (B, num of key-value pair(nkv), key_size)
        v: (B, num of key-value pair(nkv), value_size)

        to output

        output: (B, nq, num_hiddens)

        for further usage,
        e.g. (B, nq, num_hiddens) @ (num_bidden, vocab_size) -> (B, nq, vocab_size) for next token prediction
        """

        q = self.transpose_qkv(self.W_q(queries))
        k = self.transpose_qkv(self.W_k(keys))
        v = self.transpose_qkv(self.W_v(values))

        if mask:  # mask attention, nq should equal to nkv
            assert q.shape[2] == k.shape[2] and q.shape[2] == v.shape[2]
            B, T = q.shape[0], q.shape[2]

            # attention mask, a boolean mask where a value of True indicates that the element should take part in
            # attention.
            attn_mask = torch.triu(torch.ones(T, T), diagonal=0)
            attn_mask = torch.ones(T, T) * (1 - attn_mask)
            attn_mask.fill_diagonal_(1)
            attn_mask = attn_mask.bool()
            attn_mask = attn_mask.unsqueeze(0).unsqueeze(0)
            attn_mask = attn_mask.expand(B, self.n_head, T, T)
            if self.flash:
                y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask,
                                                                     dropout_p=self.dropout if self.training else 0,
                                                                     is_causal=True)  # (B, n_head, nq, hs)

            else:
                # manual implementation of attention
                att = (q @ k.transpose(-2, -1)) * (
                        1.0 / math.sqrt(k.size(-1)))  # qk^T before mask, (B, n_head, nq, nkv)
                att = att.masked_fill(attn_mask == False, float('-inf'))  # masked qk^T, (B, n_head, nq, nkv)
                att = F.softmax(att, dim=-1)  # (B, n_head, nq, nkv)
                att = self.attn_dropout(att)
                y = att @ v  # (B, n_head, nq, nkv) x (B, n_head, nkv, hs) -> (B, n_head, nq, hs)

        else:  # without mask
            if self.flash:
                y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None,
                                                                     dropout_p=self.dropout if self.training else 0,
                                                                     is_causal=True)  # (B, n_head, nq, hs)

            else:
                # manual implementation of attention
                att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))  # qk^T, (B, n_head, nq, nkv)
                att = F.softmax(att, dim=-1)  # (B, n_head, nq, nkv)
                att = self.attn_dropout(att)
                y = att @ v  # (B, n_head, nq, nkv) x (B, n_head, nkv, hs) -> (B, n_head, nq, hs)

        B, T = y.shape[0], y.shape[1]
        # 当对张量进行变换操作（如转置、维度交换）后，张量可能在内存中变得不连续。
        # contiguous方法会创建一个新的张量，将数据复制到一个连续的内存块中。
        # view方法要求张量是连续的。如果张量不连续，调用view会报错。通过contiguous方法可以确保张量在调用view之前是连续的。
        # 5. reshape the output
        y = y.transpose(1, 2).contiguous().view(B, T, -1)  # (B, nq, n_head * hs), i.e. (B, nq, num_hidden)

        # output projection
        y = self.resid_dropout(self.W_o(y))
        return y


class MLP(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
        self.gelu = nn.GELU()
        self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
        self.dropout = nn.Dropout(config.dropout)

    def forward(self, x):
        x = self.c_fc(x)
        x = self.gelu(x)
        x = self.c_proj(x)
        x = self.dropout(x)
        return x


class TransformerBlock(nn.Module):
    """
    input is x, output is a tensor with the same shape of x
    """

    def __init__(self, config):
        super().__init__()
        self.layer_norm_1 = LayerNorm(config.n_embd, bias=config.bias)  # the normalized shape is config.n_embd
        self.attn = MultiHeadAttention(config)
        self.layer_norm_2 = LayerNorm(config.n_embd, bias=config.bias)
        self.mlp = MLP(config)
        self.layer_norm_3 = LayerNorm(config.n_embd, bias=config.bias)

    def forward(self, x, memory=None, mask: bool = True):
        # Every input and output of the dot-product attention will go through a layer-norm first.
        x = self.layer_norm_1(x)
        # self attention, so query, key and value are all derived from x
        x = x + self.attn(queries=x, keys=x, values=x, mask=mask)
        x = self.layer_norm_2(x)
        if memory is not None:  # cross-attention
            assert memory.shape == x.shape
            x = x + self.attn(queries=x, key=memory, v=memory, mask=False)
            x = self.layer_norm_3(x)
        x = x + self.mlp(x)
        return x


class TransformerEncoder(nn.Module):
    def __init__(self, config):
        super().__init__()
        assert config.vocab_size is not None
        assert config.block_size is not None
        self.config = config

        self.wte = nn.Embedding(config.vocab_size, config.n_embd)  # token embedding
        self.wpe = nn.Embedding(config.block_size, config.n_embd)  # position embedding
        self.drop = nn.Dropout(config.dropout)  # dropout
        self.h = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layer)])  # transformer block
        self.layer_norm = LayerNorm(config.n_embd, bias=config.bias)  # output ffn

        self.wte.weight = self.lm_head.weight  # https://paperswithcode.com/method/weight-tying

        # init all weights
        self.apply(_init_weights)
        # apply special scaled init to the residual projections, per GPT-2 paper
        for pn, p in self.named_parameters():
            if pn.endswith('c_proj.weight'):
                torch.nn.init.normal_(p, mean=0.0, std=0.02 / math.sqrt(2 * config.n_layer))

        # report number of parameters
        print("number of parameters from encoder: %.2fM" % (self.get_num_params() / 1e6,))

    def get_num_params(self, non_embedding=True):
        """
        Return the number of parameters in the model.
        For non-embedding count (default), the position embeddings get subtracted.
        The token embeddings would too, except due to the parameter sharing these
        params are actually used as weights in the final layer, so we include them.
        """
        n_params = sum(p.numel() for p in self.parameters())
        if non_embedding:
            n_params -= self.wpe.weight.numel()
        return n_params

    def forward(self, input: torch.tensor):

        device = input.device
        b, t = input.size()
        assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
        pos = torch.arange(0, t, dtype=torch.long, device=device)  # shape (t)

        # forward the GPT model itself
        tok_emb = self.wte(input)  # token embeddings of shape (b, t, n_embd)
        pos_emb = self.wpe(pos)  # position embeddings of shape (t, n_embd)
        x = self.drop(tok_emb + pos_emb)  # (B * T * n_embd)
        for block in self.h:
            x = block(x, mask=False)
        output = self.layer_norm(x)  # (B * T * n_embd)

        return output


class TransformerDecoder(nn.Module):
    def __init__(self, config):
        super().__init__()
        assert config.vocab_size is not None
        assert config.block_size is not None
        self.config = config

        self.wte = nn.Embedding(config.vocab_size, config.n_embd)  # token embedding
        self.wpe = nn.Embedding(config.block_size, config.n_embd)  # position embedding
        self.drop = nn.Dropout(config.dropout)  # dropout
        self.h = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layer)])  # transformer block
        self.layer_norm = LayerNorm(config.n_embd, bias=config.bias)  # output ffn

        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
        # with weight tying when using torch.compile() some warnings get generated:
        # "UserWarning: functional_call was passed multiple values for tied weights.
        # This behavior is deprecated and will be an error in future versions"
        # not 100% sure what this is, so far seems to be harmless. TODO investigate
        self.wte.weight = self.lm_head.weight  # https://paperswithcode.com/method/weight-tying

        # init all weights
        self.apply(_init_weights)
        # apply special scaled init to the residual projections, per GPT-2 paper
        for pn, p in self.named_parameters():
            if pn.endswith('c_proj.weight'):
                torch.nn.init.normal_(p, mean=0.0, std=0.02 / math.sqrt(2 * config.n_layer))

        # report number of parameters
        print("number of parameters from decoder: %.2fM" % (self.get_num_params() / 1e6,))

    def get_num_params(self, non_embedding=True):
        """
        Return the number of parameters in the model.
        For non-embedding count (default), the position embeddings get subtracted.
        The token embeddings would too, except due to the parameter sharing these
        params are actually used as weights in the final layer, so we include them.
        """
        n_params = sum(p.numel() for p in self.parameters())
        if non_embedding:
            n_params -= self.wpe.weight.numel()
        return n_params

    def forward(self, input, memory=None, target=None):
        """
        If memory is None, then here is only one masked attention; if memory is not none, then add a cross-attention
        with key and value from memory
        """
        device = input.device
        b, t = input.size()
        assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
        pos = torch.arange(0, t, dtype=torch.long, device=device)  # shape (t)

        # forward the GPT model itself
        tok_emb = self.wte(input)  # token embeddings of shape (b, t, n_embd)
        pos_emb = self.wpe(pos)  # position embeddings of shape (t, n_embd)
        x = self.drop(tok_emb + pos_emb)  # (B * T * n_embd)
        for block in self.h:
            x = block(x, memory=memory, mask=True)
        x = self.layer_norm(x)  # (B * T * n_embd)

        if target is not None:  # for training process
            # if we are given some desired targets also calculate the loss
            logits = self.lm_head(x)  # (B * T * vocab_size)
            # (BT * vocab_size)的向量和(BT * 1)的向量求交叉熵损失，对BT维度的每一个维度都求出一个损失值，然后累加BT个损失值
            loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target.view(-1),
                                   ignore_index=-1)  # 将logits和targets都展开后求损失
        else:  # for inference
            # inference-time mini-optimization: only forward the lm_head on the very last position
            logits = self.lm_head(x[:, [-1], :])  # note: using list [-1] to preserve the time dim (B * 1 * vocab_size)
            loss = None

        return logits, loss


class Transformer(nn.Module):
    """
    vanilla(basic) transformer
    """

    def __init__(self, config):
        super().__init__()
        self.encoder = TransformerEncoder(config)
        self.decoder = TransformerDecoder(config)

    def forward(self, input: torch.tensor, target: torch.tensor, for_inference=False):
        # for both training and inference process, we need an input and a target
        # 这里的
        memory = self.encoder(input)
        if for_inference:
            output, loss = self.decoder(input=target, memory=memory, target=None)
        else:  # for training process, the input and target are the same
            output, loss = self.decoder(input=target, memory=memory, target=target)

        return output, loss

    @torch.no_grad()
    def generate(self, source, input, max_new_tokens, temperature=1.0, top_k=None):
        """

        :param source: the input for encoder
        :param input: the input for decoder
        :param max_new_tokens:
        :param temperature:
        :param top_k:
        :return:
        """
        memory = self.encoder(source)  # memory only calculated once
        for i in range(max_new_tokens):
            # if the sequence context is growing too long we must crop it at block_size
            input = input if input.size(1) <= self.config.block_size else input[:, -self.config.block_size:]
            # forward the model to get the logits for the index in the sequence
            """
            循环的主要步骤如下：(扩展窗口推理)
            每次输入的idx_cond: B * T，当前的样本。
            每次输出的logits: B * 1 * vocab_size (因为是推理过程，没有输入target，则取最后一个时间步)
            这里理解为: 每个输入样本都是一个句子，长度不定，对于每个输入样本无论长度如何，每次都会输出一个长度为vocab_size的向量，
            然后做softmax并抽样，就可以得到一个采样的词
            将新采样得到的词和之前的输入拼在一起，得到一个长度+1的句子，然后再输入，再次采样得到一个词，然后再拼在一起在输入，如此循环max_new_tokens次
            最终输出包含max_new_tokens个词(或符号)的句子。

            * 对于character-level的语言模型，每个token是一个字母，则最终输出包含max_new_tokens个字母(或符号)的段落。
            """
            # 每次都使用同样的memory
            logits, _ = self.decoder(input=input, memory=memory, target=None)

            # if 0 <= i <= 5:
            #     print('current input shape: ', idx_cond.shape)
            #     print('current output shape: ', logits.shape)
            ## result:
            # current input shape: torch.Size([1, 1])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 2])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 3])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 4])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 5])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 6])
            # current  output shape: torch.Size([1, 1, 65])

            # pluck the logits at the final step and scale by desired temperature
            logits = logits[:, -1, :] / temperature
            # optionally crop the logits to only the top k options
            if top_k is not None:
                v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
                logits[logits < v[:, [-1]]] = -float('Inf')  # 设置为负无穷的位置处，经过softmax后值变为0
            # apply softmax to convert logits to (normalized) probabilities
            probs = F.softmax(logits, dim=-1)
            # sample from the distribution
            next_token = torch.multinomial(probs, num_samples=1)
            # append sampled index to the running sequence and continue
            input = torch.cat((input, next_token), dim=1)

        return input

    def crop_block_size(self, block_size):
        # model surgery to decrease the block size if necessary
        # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
        # but want to use a smaller block size for some smaller, simpler model
        assert block_size <= self.config.block_size
        self.config.block_size = block_size
        self.decoder.wpe.weight = nn.Parameter(self.decoder.wpe.weight[:block_size])

        for block in self.encoder.h:
            if hasattr(block.attn, 'bias'):
                block.attn.bias = block.attn.bias[:, :, :block_size, :block_size]
        for block in self.decoder.h:
            if hasattr(block.attn, 'bias'):
                block.attn.bias = block.attn.bias[:, :, :block_size, :block_size]


class GPT(TransformerDecoder):
    """
    GPT is simply a transformer decoder architecture.
    """

    def __init__(self, config):
        super().__init__(config)
        assert config.vocab_size is not None
        assert config.block_size is not None
        self.config = config

    def forward(self, input, target=None):
        """
        对于训练过程，一次性全部输入样本和标签。
        每次输入的input: B * T，表示当前小批量的B个样本，每个样本都是一个长度为T的句子。
        每次输入的target: B * T，表示当前小批量的B个样本的标签，每个样本中，每个token的标签都是这个token在原语料中的下一个token。
        1. `character-level`的语言模型，T的每个维度都是一个`字母`在`字典`中的索引；
        2. `word-level`的语言模型，T的每个维度都是一个`单词`在`词典`中的索引；
        我们会先将输入做embedding:
        (B * T) --embedding--> (B * T * n_embd)
        然后经过GPT主体:
        (B * T * n_embd) --GPT--> (B * T * n_embd)
        然后经过最终全连接层调整输出形状:
        (B * T * n_embd) --全连接层--> (B * T * vocab_size)
        y_pred(logits): (B * T * vocab_size); y_label: (B * T * 1), 两者求交叉熵，就是将y_label的最后一个维度做one-hot encoding，
        # 然后对BT维度的每一个维度都求出一个损失值，然后累加BT个损失值

        推理过程，和训练过程总体步骤相同，不过最终输出的logits: B * 1 * vocab_size (因为是推理过程，没有输入target，则取最后一个时间步)

        e.g. for every sample, input is a sentence with shape T * C, T is the length of the sentence and C is the dim
            of embedding.
            Then through GPT, we get an output with the same shape T * C, indicating the output of every token.
            Then we walk through a MLP, get the expected shape for further usage.
        """
        device = input.device
        b, t = input.size()
        assert t <= self.config.block_size, \
            f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
        pos = torch.arange(0, t, dtype=torch.long, device=device)  # shape (t)

        # forward the GPT model itself
        tok_emb = self.wte(input)  # token embeddings of shape (b, t, n_embd)
        pos_emb = self.wpe(pos)  # position embeddings of shape (t, n_embd)
        x = self.drop(tok_emb + pos_emb)  # (B * T * n_embd)
        for block in self.h:
            x = block(x, mask=True)
        x = self.layer_norm(x)  # (B * T * n_embd)

        if target is not None:  # for training process
            # if we are given some desired targets also calculate the loss
            logits = self.lm_head(x)  # (B * T * vocab_size)
            # (BT * vocab_size)的向量和(BT * 1)的向量求交叉熵损失，对BT维度的每一个维度都求出一个损失值，然后累加BT个损失值
            loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target.view(-1),
                                   ignore_index=-1)  # 将logits和targets都展开后求损失
        else:  # for inference
            # inference-time mini-optimization: only forward the lm_head on the very last position
            logits = self.lm_head(x[:, [-1], :])  # note: using list [-1] to preserve the time dim (B * 1 * vocab_size)
            loss = None

        return logits, loss

    def crop_block_size(self, block_size):
        # model surgery to decrease the block size if necessary
        # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
        # but want to use a smaller block size for some smaller, simpler model
        assert block_size <= self.config.block_size
        self.config.block_size = block_size
        self.decoder.wpe.weight = nn.Parameter(self.decoder.wpe.weight[:block_size])
        for block in self.h:
            if hasattr(block.attn, 'bias'):
                block.attn.bias = block.attn.bias[:, :, :block_size, :block_size]

    @classmethod
    def from_pretrained(cls, model_type, override_args=None):
        assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
        override_args = override_args or {}  # default to empty dict
        # only dropout can be overridden see more notes below
        assert all(k == 'dropout' for k in override_args)
        from transformers import GPT2LMHeadModel
        print("loading weights from pretrained gpt: %s" % model_type)

        # n_layer, n_head and n_embd are determined from model_type
        config_args = {
            'gpt2': dict(n_layer=12, n_head=12, n_embd=768),  # 124M params
            'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024),  # 350M params
            'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280),  # 774M params
            'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600),  # 1558M params
        }[model_type]
        print("forcing vocab_size=50257, block_size=1024, bias=True")
        config_args['vocab_size'] = 50257  # always 50257 for GPT model checkpoints
        config_args['block_size'] = 1024  # always 1024 for GPT model checkpoints
        config_args['bias'] = True  # always True for GPT model checkpoints
        # we can override the dropout rate, if desired
        if 'dropout' in override_args:
            print(f"overriding dropout rate to {override_args['dropout']}")
            config_args['dropout'] = override_args['dropout']
        # create a from-scratch initialized minGPT model
        config = GPTConfig(**config_args)
        model = GPT(config)
        sd = model.state_dict()
        sd_keys = sd.keys()
        sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')]  # discard this mask / buffer, not a param

        # init a huggingface/transformers model
        model_hf = GPT2LMHeadModel.from_pretrained(model_type)
        sd_hf = model_hf.state_dict()

        # copy while ensuring all the parameters are aligned and match in names and shapes
        sd_keys_hf = sd_hf.keys()
        sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')]  # ignore these, just a buffer
        sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')]  # same, just the mask (buffer)
        transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
        # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
        # this means that we have to transpose these weights when we import them
        assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
        for k in sd_keys_hf:
            if any(k.endswith(w) for w in transposed):
                # special treatment for the Conv1D weights we need to transpose
                assert sd_hf[k].shape[::-1] == sd[k].shape
                with torch.no_grad():
                    sd[k].copy_(sd_hf[k].t())
            else:
                # vanilla copy over the other parameters
                assert sd_hf[k].shape == sd[k].shape
                with torch.no_grad():
                    sd[k].copy_(sd_hf[k])

        return model

    @torch.no_grad()
    def generate(self, input, max_new_tokens, temperature=1.0, top_k=None):
        """
        Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
        the sequence max_new_tokens times, feeding the predictions back into the model each time.
        Most likely you'll want to make sure to be in model.eval() mode of operation for this.
        此方法用于推理(扩展窗口推理)
        一般来说原始输入idx是<BOS>符号，在本项目的character-level的语言模型中输入的是'\n'
        """
        for i in range(max_new_tokens):
            # if the sequence context is growing too long we must crop it at block_size
            input = input if input.size(1) <= self.config.block_size else input[:, -self.config.block_size:]
            # forward the model to get the logits for the index in the sequence
            """
            循环的主要步骤如下：(扩展窗口推理)
            每次输入的idx_cond: B * T，当前的样本。
            每次输出的logits: B * 1 * vocab_size (因为是推理过程，没有输入target，则取最后一个时间步)
            这里理解为: 每个输入样本都是一个句子，长度不定，对于每个输入样本无论长度如何，每次都会输出一个长度为vocab_size的向量，
            然后做softmax并抽样，就可以得到一个采样的词
            将新采样得到的词和之前的输入拼在一起，得到一个长度+1的句子，然后再输入，再次采样得到一个词，然后再拼在一起在输入，如此循环max_new_tokens次
            最终输出包含max_new_tokens个词(或符号)的句子。
            
            * 对于character-level的语言模型，每个token是一个字母，则最终输出包含max_new_tokens个字母(或符号)的段落。
            """
            # 不过，每次推理的时候，我们其实就输入B=1一个样本，在T维度上扩展窗口输入
            logits, _ = self(input)

            # if 0 <= i <= 5:
            #     print('current input shape: ', idx_cond.shape)
            #     print('current output shape: ', logits.shape)
            ## result:
            # current input shape: torch.Size([1, 1])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 2])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 3])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 4])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 5])
            # current output shape: torch.Size([1, 1, 65])
            # current input shape: torch.Size([1, 6])
            # current output shape: torch.Size([1, 1, 65])

            # pluck the logits at the final step and scale by desired temperature
            logits = logits[:, -1, :] / temperature
            # optionally crop the logits to only the top k options
            if top_k is not None:
                v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
                logits[logits < v[:, [-1]]] = -float('Inf')
            # apply softmax to convert logits to (normalized) probabilities
            probs = F.softmax(logits, dim=-1)
            # sample from the distribution
            next_token = torch.multinomial(probs, num_samples=1)
            # append sampled index to the running sequence and continue
            input = torch.cat((input, next_token), dim=1)

        return input
