import torch
import json
import tiktoken
from tiktoken.load import load_tiktoken_bpe


class partial_func:
    """将函数作为对象传递时提前给函数绑定一些参数
    等同于 functools.partial_func
    """
    def __init__(self, func, *args, **kwargs):
        self.func = func
        self.args = args
        self.kwargs = kwargs

    def __call__(self, *args, **kwargs):
        all_args = self.args + args
        all_kwargs = {**self.kwargs, **kwargs}
        return self.func(*all_args, **all_kwargs)


class nn_layer:
    """神经网络层
    上一层的输出会成为下一层的输入
    也可以将一个函数包装成神经网络层
    """
    def __init__(self, layer:list, name:str, input_shape='unknown', output_shape='unknown'):
        self.name = name
        self.layer = layer
        self.input_shape = input_shape
        self.output_shape = output_shape


    def __repr__(self):
        return f"{self.input_shape} >>> [{self.name}] >>> {self.output_shape}"

    def __call__(self, tensor):
        if isinstance(tensor, torch.Tensor):
            self.input_shape = tensor.shape

        for func in self.layer:
            tensor = func(tensor)

        if isinstance(tensor, torch.Tensor):
            self.output_shape = tensor.shape
        return tensor


class LLM:
    def __init__(self, model_path:str, device='cpu', name=''):
        if model_path[-1] != '/':
            model_path += '/'

        self.device = torch.device(device)


        # 加载分词器
        special_tokens = [
                             "<|begin_of_text|>",
                             "<|end_of_text|>",
                             "<|reserved_special_token_0|>",
                             "<|reserved_special_token_1|>",
                             "<|reserved_special_token_2|>",
                             "<|reserved_special_token_3|>",
                             "<|start_header_id|>",
                             "<|end_header_id|>",
                             "<|reserved_special_token_4|>",
                             "<|eot_id|>",  # end of turn
                         ] + [f"<|reserved_special_token_{i}|>" for i in range(5, 256 - 5)]
        mergeable_ranks = load_tiktoken_bpe(model_path + 'tokenizer.model')
        self.tokenizer = tiktoken.Encoding(
            name=f'{name}-tokenizer' if name else 'tokenizer',
            pat_str=r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+",
            mergeable_ranks=mergeable_ranks,
            special_tokens={token: len(mergeable_ranks) + i for i, token in enumerate(special_tokens)},
        )

        # 加载权重文件
        self.weights = torch.load(model_path + "consolidated.00.pth", weights_only=True, map_location=self.device)  # 直接加载到显存上
        # for k,v in self.weights.items():  # 先加载到内存，再转移到显存上
        #     self.weights[k] = v.to(self.device)


        # 加载配置文件
        with open(model_path + "params.json", "r") as f:
            self.config = json.load(f)
            self.config["rope_theta"] = torch.tensor(self.config["rope_theta"])

        # 通过权重和配置文件获取每一个注意力头权重的维度，实际上是所有的注意力层都一样为定值128，这里演示怎么来的
        self.head_dim = self.weights['layers.0.attention.wq.weight'].shape[0] // self.config['n_heads']

        # 每一个q或k按照注意力的维数转换成复数后有多少对
        pairs_num = self.head_dim // 2
        # 计算旋转向量，在每一次计算q、k时进行旋转位置编码用到
        self.freqs = torch.tensor(range(pairs_num)) / pairs_num / self.config["rope_theta"]


        # 接着定义模型的层
        self.encode_layer = nn_layer(layer=[self.encode], name='encode_layer', input_shape='prompt')
        self.embedding_layer = nn_layer(layer=[self.embedding], name='embedding_layer')

        self.all_transformer_layers = nn_layer(name='all_transformer_layers',
            layer=[
                nn_layer(
                    name=f'transformer_layer_{layer_id}',
                    layer=[partial_func(self.attention, layer_id=layer_id), partial_func(self.feed_forward, layer_id=layer_id)]
                )
                for layer_id in range(self.config["n_layers"])
            ])

        self.linear_layer = nn_layer(layer=[self.final_mapping], name='final_mapping_layer')

        self.tensor_to_token_layers = nn_layer(name='tensor_to_token_layers', input_shape='unknown', output_shape='int',
            layer=[
                self.all_transformer_layers,
                partial_func(self.RMS_norm, weight_key="norm.weight"),
                self.linear_layer
            ]
        )

    def RMS_norm(self, tensor:torch.Tensor, weight_key:str, dim=-1, norm_eps=None):
        """对张量的某个维度做 RMS归一化操作
        Args:
            tensor: 输入张量
            weight_key: 归一化的参数名称索引
            dim: 指定对张量的哪个维度做归一化，默认是最后一个维度
            norm_eps: 一个很小的数，用以防止分母为0
        """
        if norm_eps is None:
            norm_eps = self.config['norm_eps']
        return (tensor * torch.rsqrt(tensor.pow(2).mean(dim, keepdim=True) + norm_eps)) * self.weights[weight_key]

    def encode(self, prompt:str) -> torch.Tensor:
        """编码成 token"""
        tokens = self.tokenizer.encode(prompt)
        return torch.tensor(tokens, device=self.device)

    def decode(self, tokens:int | list[int] | torch.Tensor) -> str | list[str]:
        """解码一个或一组 token"""
        if isinstance(tokens, int):
            return self.tokenizer.decode([tokens])
        elif isinstance(tokens, (tuple, list)):
            return [self.tokenizer.decode(token) for token in tokens]
        elif isinstance(tokens, torch.Tensor):
            if tokens.shape == torch.Size([]):
                return self.tokenizer.decode([tokens.item()])
            else:
                return [self.tokenizer.decode(tokens.tolist())]
        raise ValueError(f'无法解码 {tokens}')

    def embedding(self, tokens:int | list[int] | torch.Tensor) -> torch.Tensor:
        """将一个或一组 token 嵌入成向量"""
        if isinstance(tokens, int):
            return self.weights["tok_embeddings.weight"][tokens]
        elif isinstance(tokens, (tuple, list)):
            return torch.tensor([self.weights["tok_embeddings.weight"][token] for token in tokens], device=self.device)
        elif isinstance(tokens, torch.Tensor):
            embeddings = [self.weights["tok_embeddings.weight"][token].unsqueeze(0) for token in tokens]
            # 合并所有的嵌入向量
            embeddings = torch.cat(embeddings, dim=0).to(self.device)
            return embeddings

        raise ValueError(f'无法嵌入 {tokens}')

    def attention(self, tensor:torch.Tensor, layer_id:int) -> torch.Tensor:
        """某一层注意力层
        Args:
            tensor: 输入的张量（未归一化）
            layer_id: 注意力权重属于的层
        """
        # 读取权重，然后按注意力头数划分。key、value做法完全一样，query略有不同，output不需要划分
        wq = self.weights[f"layers.{layer_id}.attention.wq.weight"]                 # torch.Size([4096, 4096])
        wq = wq.view(self.config["n_heads"], self.head_dim, self.config["dim"])     # torch.Size([32, 128, 4096])

        wk = self.weights[f"layers.{layer_id}.attention.wk.weight"]                 # torch.Size([1024, 4096])
        wk = wk.view(self.config["n_kv_heads"], self.head_dim, self.config["dim"])  # torch.Size([8, 128, 4096])

        wv = self.weights[f"layers.{layer_id}.attention.wv.weight"]                 # torch.Size([4096, 4096])
        wv = wv.view(self.config["n_kv_heads"], self.head_dim, self.config['dim'])  # torch.Size([8, 128, 4096])

        wo = self.weights[f"layers.{layer_id}.attention.wo.weight"]  # torch.Size([4096, 4096])

        def RoPE(q_or_k_per_token):  # torch.Size([x, 128])
            """"旋转位置编码"""
            split_into_pairs = q_or_k_per_token.float().view(q_or_k_per_token.shape[0], -1, 2)  # torch.Size([x, 64, 2])
            view_as_complex = torch.view_as_complex(split_into_pairs)       # torch.Size([x, 64])

            freqs_for_each_token = torch.outer(torch.arange(q_or_k_per_token.shape[0]), self.freqs)         # torch.Size([x, 64])
            freqs_cis = torch.polar(torch.ones_like(freqs_for_each_token, device=self.device), freqs_for_each_token) # torch.Size([x, 64])

            split_into_pairs_rotated = torch.view_as_real(view_as_complex * freqs_cis)  # torch.Size([17, 128])

            return split_into_pairs_rotated.view(q_or_k_per_token.shape)  # torch.Size([x, 128])

        def up_triangle_mask(tensor:torch.Tensor):
            """遮盖住矩阵使其变成下三角阵
            1 0 0
            1 1 0
            1 1 1
            """
            mask = torch.full((tensor.shape[0], tensor.shape[1]), float("-inf"), device=tensor.device)
            mask = torch.triu(mask, diagonal=1)
            return tensor + mask

        normalized_tensor = self.RMS_norm(tensor, weight_key=f"layers.{layer_id}.attention_norm.weight")

        qkv_attention_store = []  # 储存每一个注意力头的结果
        # 对每一个注意力头计算注意力的权值
        for head in range(self.config['n_heads']):
            q_per_token = torch.matmul(normalized_tensor, wq[head].T)  # torch.Size([x, 128])，下4行皆同
            q_per_token_rotated = RoPE(q_per_token)
            k_per_token = torch.matmul(normalized_tensor, wk[head//4].T)
            k_per_token_rotated = RoPE(k_per_token)

            # 自注意力映射，描述了每个token的query与每个token的key的关联程度
            qk_per_token = torch.matmul(q_per_token_rotated, k_per_token_rotated.T) / self.head_dim ** 0.5  # torch.Size([x, x])，下3行皆同
            masked_qk_per_token = up_triangle_mask(qk_per_token)
            masked_qk_per_token_after_softmax = torch.nn.functional.softmax(masked_qk_per_token, dim=1).to(torch.bfloat16)

            v_per_token = torch.matmul(normalized_tensor, wv[head//4].T)  # torch.Size([x, 128])

            # 至此可以计算完整的attention了
            qkv_attention = torch.matmul(masked_qk_per_token_after_softmax, v_per_token)  # torch.Size([x, 128])
            qkv_attention_store.append(qkv_attention)
        # 把上面每一个头的结果合并起来
        stacked_qkv_attention = torch.cat(qkv_attention_store, dim=-1)  # torch.Size([x, 4096])
        
        # 最后加一个线性的输出层就完事了！
        delta = torch.matmul(stacked_qkv_attention, wo.T)  # torch.Size([x, 4096])

        return tensor + delta

    def feed_forward(self, tensor:torch.Tensor, layer_id:int) -> torch.Tensor:
        w1 = self.weights[f"layers.{layer_id}.feed_forward.w1.weight"]  # torch.Size([14336, 4096])
        w2 = self.weights[f"layers.{layer_id}.feed_forward.w2.weight"]  # torch.Size([4096, 14336])
        w3 = self.weights[f"layers.{layer_id}.feed_forward.w3.weight"]  # torch.Size([14336, 4096])

        normalized_tensor = self.RMS_norm(tensor, weight_key=f"layers.{layer_id}.ffn_norm.weight")

        fc_up = torch.matmul(normalized_tensor, w3.T)  # torch.Size([x, 14336])
        fc_gate = torch.functional.F.silu(torch.matmul(normalized_tensor, w1.T))  # torch.Size([x, 14336])
        delta = torch.matmul(fc_gate * fc_up, w2.T)  # torch.Size([x, 4096])

        return tensor + delta  # torch.Size([x, 4096])

    def final_mapping(self, tensor:torch.Tensor) -> int:
        logits = torch.matmul(tensor[-1], self.weights["output.weight"].T)
        return torch.argmax(logits, dim=-1).item()

    def next_token(self, tensor:torch.Tensor) -> int:
        return self.tensor_to_token_layers(tensor)

    def generate(self, prompt):  # TODO：modify
        tokens = self.encode_layer(prompt)
        embedding_tensor = self.embedding_layer(tokens)
        input_len = tokens.shape[0]

        tensor = embedding_tensor
        while tokens[-1] != 128001:
            new_token = self.tensor_to_token_layers(tensor)
            tokens = torch.cat((tokens, torch.tensor([new_token], device=self.device)))
            tensor = torch.cat((tensor, self.embedding(new_token).unsqueeze(0)), dim=0)
            print(self.decode(new_token), end='')
        return self.decode(tokens[input_len:])


model = LLM(model_path="../Meta-Llama-3-8B/original/", device='cuda')
print('模型配置完成')


input = "the answer to the ultimate question of life, the universe, and everything is "
prompt_to_an_answer_layer = nn_layer(name='prompt_to_an_answer_layer', input_shape='prompt', output_shape='int',
    layer=[
        model.encode_layer,
        model.embedding_layer,
        model.tensor_to_token_layers,
        model.decode
])

reply = prompt_to_an_answer_layer(input)

print(reply)

