#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Qwen3-0.6B 使用示例
"""

import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import os

def load_qwen3_model(model_path):
    """
    加载Qwen3-0.6B模型和tokenizer
    """
    print(f"正在加载模型: {model_path}")
    
    # 加载tokenizer
    tokenizer = AutoTokenizer.from_pretrained(
        model_path,
        trust_remote_code=True,
        padding_side="left"
    )
    
    # 设置pad_token
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    
    # 加载模型
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
        device_map="auto" if torch.cuda.is_available() else None,
        trust_remote_code=True
    )
    
    print(f"模型加载完成，参数量: {model.num_parameters():,}")
    print(f"使用设备: {next(model.parameters()).device}")
    
    return model, tokenizer

def generate_text(model, tokenizer, prompt, max_length=200, temperature=0.7, top_p=0.9):
    """
    生成文本
    """
    # 编码输入
    inputs = tokenizer.encode(prompt, return_tensors="pt")
    
    # 移动到模型设备
    device = next(model.parameters()).device
    inputs = inputs.to(device)
    
    # 生成文本
    with torch.no_grad():
        outputs = model.generate(
            inputs,
            max_length=max_length,
            temperature=temperature,
            top_p=top_p,
            do_sample=True,
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id,
            repetition_penalty=1.1
        )
    
    # 解码输出
    generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    # 返回生成的新文本（去掉原始prompt）
    new_text = generated_text[len(prompt):].strip()
    return new_text

def chat_with_qwen3(model, tokenizer):
    """
    与Qwen3进行对话
    """
    print("\n=== Qwen3-0.6B 对话模式 ===")
    print("输入 'quit' 或 'exit' 退出对话")
    print("输入 'clear' 清空对话历史")
    print("-" * 50)
    
    conversation_history = ""
    
    while True:
        user_input = input("\n用户: ").strip()
        
        if user_input.lower() in ['quit', 'exit', '退出']:
            print("再见！")
            break
        
        if user_input.lower() in ['clear', '清空']:
            conversation_history = ""
            print("对话历史已清空")
            continue
        
        if not user_input:
            continue
        
        # 构建对话prompt
        if conversation_history:
            prompt = f"{conversation_history}\n用户: {user_input}\n助手: "
        else:
            prompt = f"用户: {user_input}\n助手: "
        
        try:
            # 生成回复
            response = generate_text(
                model, tokenizer, prompt, 
                max_length=len(tokenizer.encode(prompt)) + 150,
                temperature=0.7,
                top_p=0.9
            )
            
            # 清理回复（去掉可能的多余内容）
            response = response.split('\n')[0].strip()
            if not response:
                response = "抱歉，我没有理解您的问题。"
            
            print(f"助手: {response}")
            
            # 更新对话历史
            conversation_history = f"{prompt}{response}"
            
            # 限制对话历史长度
            if len(conversation_history) > 1000:
                lines = conversation_history.split('\n')
                conversation_history = '\n'.join(lines[-10:])  # 保留最近10轮对话
                
        except Exception as e:
            print(f"生成回复时出错: {e}")

def test_examples(model, tokenizer):
    """
    测试一些示例
    """
    print("\n=== 测试示例 ===")
    
    examples = [
        "请介绍一下人工智能的发展历史",
        "什么是深度学习？",
        "请写一首关于春天的诗",
        "解释一下什么是机器学习",
        "Python有哪些优点？"
    ]
    
    for i, example in enumerate(examples, 1):
        print(f"\n示例 {i}: {example}")
        print("-" * 30)
        
        try:
            response = generate_text(
                model, tokenizer, example,
                max_length=300,
                temperature=0.7,
                top_p=0.9
            )
            print(f"回答: {response}")
        except Exception as e:
            print(f"生成失败: {e}")
        
        print()

def main():
    """
    主函数
    """
    # 模型路径
    model_path = "/home/zkh/lzt/damoxing/boshuyuce/xindaoyuce/Qwen3-0.6B"
    
    # 检查模型路径是否存在
    if not os.path.exists(model_path):
        print(f"错误: 模型路径不存在: {model_path}")
        return
    
    try:
        # 加载模型
        model, tokenizer = load_qwen3_model(model_path)
        
        print("\n选择运行模式:")
        print("1. 测试示例")
        print("2. 对话模式")
        print("3. 自定义文本生成")
        
        choice = input("\n请选择 (1/2/3): ").strip()
        
        if choice == "1":
            test_examples(model, tokenizer)
        elif choice == "2":
            chat_with_qwen3(model, tokenizer)
        elif choice == "3":
            prompt = input("请输入要生成的文本开头: ").strip()
            if prompt:
                print("\n生成中...")
                response = generate_text(model, tokenizer, prompt, max_length=400)
                print(f"\n生成结果:\n{prompt}{response}")
        else:
            print("无效选择")
            
    except Exception as e:
        print(f"运行出错: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main() 