from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
import torch
import numpy as np

"""
# 设置Numpy输出选项
np.set_printoptions(threshold = np.inf,    # 禁用元素数量限制
                    #linewidth = 200,      # 每行显示更多元素
                    suppress = True)       # 禁用科学计数法‌
"""
# 设置PyTorch打印选项
# torch.set_printoptions(profile="full")     # 关闭张量折叠显示‌


# 检查CUDA是否可用
print("===================CUDA information===================")
print(f"torch.__version__: {torch.__version__}")
print(f"torch.cuda.is_available(): {torch.cuda.is_available()}")
print(f"torch.version.cuda: {torch.version.cuda}")
print(f"torch.backends.cudnn.version(): {torch.backends.cudnn.version()}")

# 检查可用的GPU设备
print("===================GPU information===================")
if torch.cuda.is_available():
    n_gpus = torch.cuda.device_count()
    print(f"Available GPUs: {n_gpus}")
    for i in range(n_gpus):
        print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
else:
    print("No GPUs available. Using CPU.")


#model_dir = "C:\\Users\\zdc\\.cache\\modelscope\\hub\\models\\deepseek-ai\\DeepSeek-R1-Distill-Qwen-1.5B"
#model_dir = "C:\\Users\\zdc\\.cache\\modelscope\\hub\\models\\deepseek-ai\\DeepSeek-R1-Distill-Qwen-7B"
model_dir = "D:\\github\\py1\\DeepSeek-R1-Distill-Qwen-1.5B"
#model_dir = "D:\\github\\py1\\DeepSeek-R1-Distill-Qwen-7B"

# 加载分词器
print("===================Token information===================")
tokenizer = AutoTokenizer.from_pretrained(model_dir)
print(f"EOS Token ID: {tokenizer.eos_token_id}, EOS Token: {tokenizer.decode(tokenizer.eos_token_id)}")
print(f"Token ID: 151646, Token: {tokenizer.decode(151646)}")
print(f"Token ID: 100007, Token: {tokenizer.decode(100007)}")
print(f"Token ID: 101128, Token: {tokenizer.decode(101128)}")
print(f"Token ID: 104949, Token: {tokenizer.decode(104949)}")
print(f"Token ID: 101494, Token: {tokenizer.decode(101494)}")
print(f"Token ID: 119461, Token: {tokenizer.decode(119461)}")
print(f"Token ID: 11319, Token: {tokenizer.decode(11319)}")
print(f"Token ID: 1, Token: {tokenizer.decode(1)}")

# 自定义流式处理器
class MyStreamer(TextStreamer):
    def __init__(self, tokenizer, **kwargs):
        super().__init__(tokenizer, skip_prompt = True, **kwargs) 
    def on_finalized_text(self, text: str, stream_end: bool = False):
        # 实现逐字输出
        print(text, end = "", flush = True)
        if stream_end:
            print("\n\n")
# 创建流式处理器
streamer = MyStreamer(tokenizer)

max_memory = {0: "20GiB"}
device_map = "auto" # 自动选择GPU或CPU, device name (e.g. cpu, cuda:0) or 'auto', 'balanced', 'balanced_low_0', 'sequential'.
# 加载模型
print("===================Model loading information===================")
model = AutoModelForCausalLM.from_pretrained(
    model_dir,
    torch_dtype = torch.float16,    # 使用FP16精度
#    torch_dtype = torch.int8,
#    offload_folder = "offload",    # 指定CPU卸载目录  
#    offload_state_dict = True,     # 卸载模型参数到CPU     
#    max_memory = max_memory,
    output_hidden_states = True,   # 新增此参数以获取隐藏状态‌
    device_map = device_map
)

print("===================Model information===================")
"""
print(f"model.named_parameters()")
for name, _ in model.named_parameters():  
    print(f"name: {name}")
"""
print(f"model.config.architectures: {model.config.architectures}")
print(f"model.hf_device_map: {model.hf_device_map}")
print(f"model.device: {model.device}")

def generate(user_input):
    print()
    print(f"你输入的内容是：{user_input}")
    # 分词
    inputs = tokenizer(user_input, return_tensors = "pt").to(model.device)
    print(inputs)
    
    # 新增向量提取逻辑
    with torch.no_grad():
        outputs = model(**inputs)
        hidden_states = outputs.hidden_states[-1]  # 获取最后一层隐藏状态‌
    print("\n输入文本的向量化表示：")
    #print(hidden_states.cpu().numpy())             # 转为CPU防止显存占用过高
    #print(hidden_states.cpu())                     # 转为CPU防止显存占用过高
    print(hidden_states)      
   
    # 生成文本
    print()
    print("模型生成的回答是：")
    print()
    outputs = model.generate(
        **inputs,
        eos_token_id = tokenizer.eos_token_id,
        pad_token_id = tokenizer.eos_token_id,
        streamer = streamer,
        max_length = 300,           # 最大生成长度
        num_beams = 1,              # 束搜索个数，默认为1只选择概率最高的那个，一般设置了early_stopping为True时，需要将num_beams设置为一个大于1的数
        early_stopping = False,     # 提前终止：当所有束搜索候选序列生成结束符时停止生成
        repetition_penalty = 1.5,   # 抑制重复token生成的惩罚系数
        temperature = 0.7,          # 温度参数
        top_p = 0.9,                # Top-p采样
        do_sample = True            # 启用采样
    )
    
"""
    # 解码输出
    generated_text = tokenizer.decode(outputs[0], skip_special_tokens = True)
    print(generated_text)    
"""

generate("如何理解模型蒸馏？")

while True:
    user_input = input("请输入内容（输入 'q' 退出）：")
    if user_input == 'q':
        break
    generate(user_input)
 
