import torch
import torch.nn.utils.prune as prune
import gc
from transformers import AutoModelForCausalLM, AutoTokenizer

# 设置和加载Qwen/Qwen2.5-0.5B模型
def prune_model():
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"======Using {device} device======")
    print("======Loading model======")
    model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-2.5-0.5B", trust_remote_code=True, torch_dtype=torch.bfloat16)
    tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-2.5-0.5B", trust_remote_code=True)

    #  辅助函数，计算模型参数数量，计算模型的总参数和非零参数数量
    def count_nonzero_params(model):
        total_params = 0
        total_nonzero_params = 0
        for name, param in model.named_parameters():
            if param.requires_grad:
                total_params += param.numel()
                total_nonzero_params += torch.sum(param != 0).item()
        return total_params, total_nonzero_params

    # 剪之前分析模型参数
    print("======Before pruning======")
    total_params, total_nonzero_params = count_nonzero_params(model)
    print(f"Total parameters: {total_params}")
    print(f"Total nonzero parameters: {total_nonzero_params}")

    prompt = "你好，请介绍下你自己，并解析一下什么是AI"
    input_ids = tokenizer(prompt, return_tensors="pt").to(device)

    # 生成文本
    output_before = model.generate(**input_ids, max_new_tokens=100, do_sample=True)
    text_before = tokenizer.decode(output_before[0], skip_special_tokens=True)

    # 3.剪枝
    
