import torch
from model3 import SamOut
from tqdm import tqdm
from hex_read_sort import TokenSystem
import json

# 加载词汇表
token_system = TokenSystem()
token_system.load("token_system")

# 模型配置
num_layers = 8
hidden_size = 2 ** 6 * num_layers
num_heads = num_layers

# 初始化并加载模型
model = SamOut(
    voc_size=12507, 
    hidden_size=hidden_size, 
    num_heads=num_heads, 
    num_layers=num_layers
)
model.load_state_dict(torch.load("model_pretrain_cap_1_sft_new.pth"))
model.eval().to("cuda")
print("模型加载完成")

# 改进的采样函数：加入重复抑制机制
def sample_with_repetition_suppression(logits: torch.Tensor, 
                                      generated_ids: list,
                                      temperature: float = 1.0,
                                      top_k: int = None, 
                                      top_p: float = None,
                                      repetition_penalty: float = 1.2) -> int:
    """
    采样函数增加重复抑制机制
    
    参数:
    - logits: 原始logits张量
    - generated_ids: 已生成的token序列
    - repetition_penalty: 重复惩罚系数 (>1)
    """
    # 应用重复惩罚机制
    if generated_ids and repetition_penalty > 1.0:
        # 获取最近出现过的token
        recent_ids = set(generated_ids[-min(50, len(generated_ids)):])  # 考虑最近的50个token
        
        for token_id in recent_ids:
            if token_id < logits.size(0):  # 确保token_id在有效范围内
                if logits[token_id] > 0:
                    logits[token_id] /= repetition_penalty
                else:
                    logits[token_id] *= repetition_penalty
    
    # 温度缩放
    logits = logits / max(temperature, 1e-6)
    
    # 温度为零时直接取最大值
    if temperature < 1e-6:
        return logits.argmax().item()
    
    # 处理数值异常
    if torch.isnan(logits).any() or torch.isinf(logits).any():
        logits = torch.nan_to_num(logits, nan=0.0, posinf=1e8, neginf=-1e8)
    
    # 计算概率分布
    probs = torch.softmax(logits, dim=-1)
    
    # Top-k过滤
    if top_k is not None and top_k > 0:
        top_k = min(top_k, probs.size(-1))
        topk_probs, topk_indices = torch.topk(probs, top_k)
        filtered_probs = torch.zeros_like(probs)
        filtered_probs.scatter_(-1, topk_indices, topk_probs)
        probs = filtered_probs
    
    # Top-p（核采样）过滤
    if top_p is not None and 0 < top_p < 1.0:
        sorted_probs, sorted_indices = torch.sort(probs, descending=True)
        cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
        remove_mask = cumulative_probs > top_p
        remove_mask[..., 1:] = remove_mask[..., :-1].clone()
        remove_mask[..., 0] = False
        sorted_probs[remove_mask] = 0.0
        filtered_probs = torch.zeros_like(probs)
        filtered_probs.scatter_(-1, sorted_indices, sorted_probs)
        probs = filtered_probs
    
    # 重新归一化概率
    probs_sum = probs.sum()
    if probs_sum <= 0:
        probs = torch.ones_like(probs) / probs.numel()
    else:
        probs = probs / probs_sum
    
    # 防止零概率
    probs = probs + 1e-10 / probs.numel()
    probs = probs / probs.sum()
    
    # 采样
    probs_cpu = probs.cpu()
    next_token = torch.multinomial(probs_cpu, num_samples=1).item()
    
    return next_token

# 改进的文本生成函数（加入重复抑制）
def generate_text(
    prompt: str,
    max_length: int = 200,
    temperature: float = 0.8,
    top_k: int = 50,
    top_p: float = 0.9,
    repetition_penalty: float = 1.2,
    eos_token: int = 2,
    debug: bool = False
) -> str:
    """
    改进的文本生成函数，加入重复抑制机制
    
    参数:
    - repetition_penalty: 重复惩罚系数 (1.0 = 无惩罚，>1.0 表示增加惩罚)
    """
    prompt_ids = token_system.encode(prompt)
    input_ids = [1, 12501] + prompt_ids + [12502]
    
    # 完整序列（包括输入）
    full_sequence = input_ids.copy()
    input_tensor = torch.tensor([input_ids], dtype=torch.long).to("cuda")
    
    with torch.no_grad():
        for step in tqdm(range(max_length), desc="生成中"):
            output, _ = model(input_tensor)
            next_token_logits = output[0, -1, :]
            
            # 已生成部分（不包括当前输入）
            generated_part = full_sequence[len(input_ids):]
            
            # 应用带重复抑制的采样
            next_token_id = sample_with_repetition_suppression(
                next_token_logits,
                generated_ids=generated_part,  # 只传递已生成的部分
                temperature=temperature,
                top_k=top_k,
                top_p=top_p,
                repetition_penalty=repetition_penalty
            )
            
            # 结束判断
            if next_token_id == eos_token:
                break
                
            # 更新序列
            full_sequence.append(next_token_id)
            input_tensor = torch.cat([
                input_tensor, 
                torch.tensor([[next_token_id]], device="cuda")
            ], dim=1)
            
            # 调试输出
            if debug:
                token_str = token_system.decode([next_token_id])
                print(f"步数 {step}: {next_token_id} -> {token_str}")
    
    # 解码生成的响应部分
    response_ids = full_sequence[len(input_ids):]
    return token_system.decode(response_ids)

# 批量生成函数（加入重复抑制）
def batch_generate_texts(
    prompts: list,
    max_length: int = 200,
    temperature: float = 0.8,
    top_k: int = 50,
    top_p: float = 0.9,
    repetition_penalty: float = 1.2,
    batch_size: int = 4
) -> list:
    results = []
    
    for i in tqdm(range(0, len(prompts), batch_size), desc="批量生成"):
        batch_prompts = prompts[i:i+batch_size]
        batch_results = []
        
        for prompt in batch_prompts:
            result = generate_text(
                prompt=prompt,
                max_length=max_length,
                temperature=temperature,
                top_k=top_k,
                top_p=top_p,
                repetition_penalty=repetition_penalty,
                eos_token=2,
                debug=False
            )
            batch_results.append(result)
            torch.cuda.empty_cache()
        
        results.extend(batch_results)
    
    return results

# 主程序（包含不同重复抑制参数测试）
if __name__ == "__main__":
    # 系统提示模板
    system_prompt = "你是一位人工智能助手，请用中文回答以下问题：\n"
    
    test_prompts = [
        system_prompt + "自然语言处理是什么？",
        system_prompt + "如何训练一个大型语言模型？",
        system_prompt + "用简单的话解释量子计算",
        system_prompt + "写一首关于春天的诗",
        system_prompt + "写一篇科幻短篇故事"
    ]
    
    # 测试不同重复抑制参数的效果
    repetition_settings = [1.0, 1.2, 1.5]  # 重复惩罚系数
    
    for penalty in repetition_settings:
        print(f"\n===== 重复抑制系数: {penalty} =====")
        
        for i, prompt in enumerate(test_prompts):
            print(f"\n[{i+1}] 提示: {prompt}")
            
            # 生成文本
            generated_text = generate_text(
                prompt=prompt,
                temperature=0.9,
                top_k=10,
                repetition_penalty=penalty,
                max_length=180
            )
            
            # 分析重复情况
            tokens = token_system.encode(generated_text)
            unique_tokens = len(set(tokens))
            repetition_rate = 1 - unique_tokens / len(tokens) if tokens else 0
            
            print(f"生成结果: {generated_text}")
            print(f"长度: {len(tokens)} token, 重复率: {repetition_rate:.2%}")
            print("-" * 80)
    
    print("\n===== 批量生成测试 =====")
    batch_results = batch_generate_texts(
        test_prompts * 2, 
        temperature=0.7,
        top_k=50,
        top_p=0.95,
        repetition_penalty=1.3,
        max_length=180,
        batch_size=3
    )
    
    for i, result in enumerate(batch_results):
        print(f"\n结果 {i+1}:")
        print(result)
        print("-" * 60)