"""
可跳层模型：基于原始LLM实现跳层推理功能
"""
import torch

class SkipableModel_:
    """可跳层的语言模型包装器"""
    
    def __init__(self, model, tokenizer):
        """
        初始化可跳层模型
        
        Args:
            model: 预训练的语言模型
            tokenizer: 对应的分词器
        """
        self.model = model
        self.tokenizer = tokenizer
        self.device = model.device
        self.num_layers = model.config.num_hidden_layers
    
    def full_inference(self, prompt):
        """
        完整推理：使用所有层进行推理
        
        Args:
            prompt: 输入提示文本
            
        Returns:
            str: 预测的下一个词元
        """
        inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
        input_ids = inputs.input_ids
        
        with torch.no_grad():
            outputs = self.model(input_ids)
            logits = outputs.logits
            
            # 获取下一个词元的预测
            next_token_logits = logits[:, -1, :]
            next_token_id = torch.argmax(next_token_logits, dim=-1)
            predicted_token = self.tokenizer.decode(next_token_id)
            
        return predicted_token.strip()
    
    def skip_inference(self, prompt, layer_mask):
        """
        跳层推理：根据层掩码跳过指定层进行推理
        
        Args:
            prompt: 输入提示文本
            layer_mask: 层掩码，True表示跳过该层，False表示执行该层
                       长度应等于模型层数
            
        Returns:
            str: 预测的下一个词元
        """
        if len(layer_mask) != self.num_layers:
            raise ValueError(f"层掩码长度({len(layer_mask)})必须等于模型层数({self.num_layers})")
        
        inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
        input_ids = inputs.input_ids
        batch_size, seq_length = input_ids.shape
        attention_mask_2d = inputs.attention_mask
        
        # 准备position_ids，计算旋转位置编码所必需
        position_ids = torch.arange(
            0, seq_length, dtype=torch.long, device=self.device
        ).unsqueeze(0)
        
        with torch.no_grad():
            # 初始嵌入层
            hidden_states = self.model.model.embed_tokens(input_ids)
            
            # 计算旋转位置编码 (RoPE)
            try:
                rotary_emb = self.model.model.rotary_emb(hidden_states, position_ids)
            except TypeError:
                # 兼容更高版本的 transformers
                rotary_emb = self.model.model.rotary_emb(hidden_states, seq_len=seq_length)
            
            # 构建符合SDPA要求的4D注意力掩码
            if seq_length > 1:
                # 创建基础浮点型掩码，所有位置初始为0.0 (attend)
                final_attention_mask = torch.zeros(
                    (batch_size, 1, seq_length, seq_length), 
                    dtype=self.model.dtype, 
                    device=self.device
                )
                
                # 创建布尔因果掩码 (True 代表要屏蔽的未来位置)
                causal_mask = torch.ones(
                    (seq_length, seq_length), dtype=torch.bool, device=self.device
                ).triu(diagonal=1)
                
                # 应用因果掩码到浮点掩码上
                final_attention_mask.masked_fill_(
                    causal_mask[None, None, :, :], 
                    torch.finfo(self.model.dtype).min
                )
                
                # 应用填充掩码到浮点掩码上
                final_attention_mask.masked_fill_(
                    attention_mask_2d[:, None, None, :] == 0, 
                    torch.finfo(self.model.dtype).min
                )
            else:
                final_attention_mask = None
            
            # 遍历Transformer块
            for i, decoder_layer in enumerate(self.model.model.layers):
                if layer_mask[i]:  # 如果掩码为True，跳过该层
                    continue
                else:
                    layer_outputs = decoder_layer(
                        hidden_states,
                        attention_mask=final_attention_mask,
                        position_embeddings=rotary_emb
                    )
                    hidden_states = layer_outputs[0]
            
            # 应用最终的归一化层和输出层
            hidden_states = self.model.model.norm(hidden_states)
            logits = self.model.lm_head(hidden_states)
            
            # 获取预测结果
            next_token_logits = logits[:, -1, :]
            next_token_id = torch.argmax(next_token_logits, dim=-1)
            predicted_token = self.tokenizer.decode(next_token_id)
            
        return predicted_token.strip()
    
    def skip_single_layer(self, prompt, layer_to_skip):
        """
        跳过单个层进行推理
        
        Args:
            prompt: 输入提示文本
            layer_to_skip: 要跳过的层索引
            
        Returns:
            str: 预测的下一个词元
        """
        layer_mask = [False] * self.num_layers
        layer_mask[layer_to_skip] = True
        return self.skip_inference(prompt, layer_mask)
    
    def skip_consecutive_layers(self, prompt, start_layer, end_layer):
        """
        跳过连续多层进行推理
        
        Args:
            prompt: 输入提示文本
            start_layer: 起始层索引（包含）
            end_layer: 结束层索引（包含）
            
        Returns:
            str: 预测的下一个词元
        """
        if start_layer < 0 or end_layer >= self.num_layers:
            raise ValueError(f"层索引超出范围: [{start_layer}, {end_layer}], 模型总层数: {self.num_layers}")
        if start_layer > end_layer:
            raise ValueError(f"起始层({start_layer})不能大于结束层({end_layer})")
        
        layer_mask = [False] * self.num_layers
        for i in range(start_layer, end_layer + 1):
            layer_mask[i] = True
        return self.skip_inference(prompt, layer_mask)

import torch

class SkipableModel:
    """可跳层的语言模型包装器"""
    
    def __init__(self, model, tokenizer):
        """
        初始化可跳层模型
        
        Args:
            model: 预训练的语言模型
            tokenizer: 对应的分词器
        """
        self.model = model
        self.tokenizer = tokenizer
        self.device = model.device
        self.num_layers = model.config.num_hidden_layers
    
    def full_inference(self, prompt):
        """
        完整推理：使用所有层进行推理
        
        Args:
            prompt: 输入提示文本
            
        Returns:
            str: 预测的下一个词元
        """
        inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
        input_ids = inputs.input_ids
        
        with torch.no_grad():
            outputs = self.model(input_ids)
            logits = outputs.logits
            
            # 获取下一个词元的预测
            next_token_logits = logits[:, -1, :]
            next_token_id = torch.argmax(next_token_logits, dim=-1)
            predicted_token = self.tokenizer.decode(next_token_id)
            
        return predicted_token.strip()
    
    def skip_inference(self, prompt, layer_mask):
        """
        跳层推理：根据层掩码跳过指定层进行推理
        
        Args:
            prompt: 输入提示文本
            layer_mask: 层掩码，True表示跳过该层，False表示执行该层
                       长度应等于模型层数
            
        Returns:
            str: 预测的下一个词元
        """
        if len(layer_mask) != self.num_layers:
            raise ValueError(f"层掩码长度({len(layer_mask)})必须等于模型层数({self.num_layers})")
        
        inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
        input_ids = inputs.input_ids
        batch_size, seq_length = input_ids.shape
        attention_mask_2d = inputs.attention_mask
        
        # 准备position_ids，这是让模型内部处理位置编码的关键
        position_ids = torch.arange(
            0, seq_length, dtype=torch.long, device=self.device
        ).unsqueeze(0)
        
        with torch.no_grad():
            # 初始嵌入层
            hidden_states = self.model.model.embed_tokens(input_ids)
            
            # 构建符合SDPA要求的4D注意力掩码
            if seq_length > 1:
                final_attention_mask = torch.zeros(
                    (batch_size, 1, seq_length, seq_length), 
                    dtype=self.model.dtype, 
                    device=self.device
                )
                
                causal_mask = torch.ones(
                    (seq_length, seq_length), dtype=torch.bool, device=self.device
                ).triu(diagonal=1)
                
                final_attention_mask.masked_fill_(
                    causal_mask[None, None, :, :], 
                    torch.finfo(self.model.dtype).min
                )
                
                final_attention_mask.masked_fill_(
                    attention_mask_2d[:, None, None, :] == 0, 
                    torch.finfo(self.model.dtype).min
                )
            else:
                final_attention_mask = None
            
            # 遍历Transformer块
            for i, decoder_layer in enumerate(self.model.model.layers):
                if layer_mask[i]:  # 如果掩码为True，跳过该层
                    continue
                else:
                    # 正确的调用方式：传递 `position_ids`，并设置 `use_cache`。
                    # 模型将自行处理位置编码，并正确返回（或不返回）KV缓存。
                    # 这对于 `transformers` v4.31.0 及以上版本都是健壮的。
                    layer_outputs = decoder_layer(
                        hidden_states,
                        attention_mask=final_attention_mask,
                        position_ids=position_ids,
                        use_cache=True # 即使我们不在这里使用缓存，这也是正确的调用方式
                    )
                    hidden_states = layer_outputs[0]
                    print(layer_outputs[1])
            
            # 应用最终的归一化层和输出层
            hidden_states = self.model.model.norm(hidden_states)
            logits = self.model.lm_head(hidden_states)
            
            # 获取预测结果
            next_token_logits = logits[:, -1, :]
            next_token_id = torch.argmax(next_token_logits, dim=-1)
            predicted_token = self.tokenizer.decode(next_token_id)
            
        return predicted_token.strip()
    
    def skip_single_layer(self, prompt, layer_to_skip):
        """
        跳过单个层进行推理
        
        Args:
            prompt: 输入提示文本
            layer_to_skip: 要跳过的层索引
            
        Returns:
            str: 预测的下一个词元
        """
        layer_mask = [False] * self.num_layers
        if 0 <= layer_to_skip < self.num_layers:
            layer_mask[layer_to_skip] = True
        else:
            raise ValueError(f"要跳过的层索引({layer_to_skip})超出范围 [0, {self.num_layers-1}]")
        return self.skip_inference(prompt, layer_mask)
    
    def skip_consecutive_layers(self, prompt, start_layer, end_layer):
        """
        跳过连续多层进行推理
        
        Args:
            prompt: 输入提示文本
            start_layer: 起始层索引（包含）
            end_layer: 结束层索引（包含）
            
        Returns:
            str: 预测的下一个词元
        """
        if not (0 <= start_layer < self.num_layers and 0 <= end_layer < self.num_layers):
            raise ValueError(f"层索引超出范围: [{start_layer}, {end_layer}], 模型总层数: {self.num_layers}")
        if start_layer > end_layer:
            raise ValueError(f"起始层({start_layer})不能大于结束层({end_layer})")
        
        layer_mask = [False] * self.num_layers
        for i in range(start_layer, end_layer + 1):
            layer_mask[i] = True
        return self.skip_inference(prompt, layer_mask)

