#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Qwen3-0.6B 深度推理模式演示
基于您提供的例子，实现带有思考过程的对话功能
"""

import os
import torch
import logging
from pathlib import Path
from typing import Optional, Tuple, List, Dict
from modelscope import snapshot_download, AutoModelForCausalLM, AutoTokenizer
from modelscope.utils.constant import DownloadMode

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def find_existing_model(model_id: str, cache_dir: str, revision: str = 'master') -> Optional[str]:
    """查找已存在的模型路径"""
    try:
        logger.info(f"正在查找模型 {model_id}...")
        
        # 构建可能的路径列表
        possible_paths = []
        
        # 1. 您的实际路径格式：cache_dir/Qwen/Qwen3-0___6B/
        if 'Qwen' in model_id:
            possible_paths.append(os.path.join(cache_dir, 'Qwen', 'Qwen3-0___6B'))
        
        # 2. 通用路径格式
        possible_paths.extend([
            os.path.join(cache_dir, model_id.replace('/', '-'), revision),
            os.path.join(cache_dir, model_id),
            os.path.join(cache_dir, 'hub', model_id.replace('/', '--'), revision),
            os.path.join(cache_dir, model_id.replace('/', '--')),
        ])
        
        # 3. 递归搜索cache_dir下的所有子目录
        if os.path.exists(cache_dir):
            for root, dirs, files in os.walk(cache_dir):
                if 'config.json' in files:
                    model_files = ['model.safetensors', 'pytorch_model.bin', 'model.bin']
                    has_model_file = any(f in files for f in model_files)
                    if has_model_file:
                        possible_paths.append(root)
        
        # 去重并检查路径
        unique_paths = list(dict.fromkeys(possible_paths))
        
        logger.info(f"检查 {len(unique_paths)} 个可能的路径:")
        for i, path in enumerate(unique_paths, 1):
            logger.info(f"  {i}. {path}")
            config_path = os.path.join(path, 'config.json')
            if os.path.exists(config_path):
                logger.info(f"✅ 找到已存在的模型: {path}")
                return path
            else:
                logger.debug(f"❌ 路径不存在: {path}")
                
        logger.warning(f"未找到模型 {model_id}，将尝试下载")
        return None
    except Exception as e:
        logger.warning(f"查找模型时出错: {e}")
        return None


def download_qwen_model(model_id: str, cache_dir: str, revision: str = 'master') -> str:
    """下载Qwen模型"""
    logger.info(f"检查模型 {model_id} 是否已下载...")
    
    # 首先尝试查找已存在的模型
    existing_path = find_existing_model(model_id, cache_dir, revision)
    if existing_path:
        logger.info("模型已存在，跳过下载")
        return existing_path

    # 如果模型不存在，则下载
    try:
        logger.info(f"开始下载模型至 {cache_dir}...")
        model_dir = snapshot_download(
            model_id=model_id,
            cache_dir=cache_dir,
            revision=revision
        )
        logger.info("模型下载完成！")
        return model_dir
    except Exception as e:
        logger.error(f"下载模型失败: {e}")
        raise


def load_qwen_model(model_dir: str) -> Tuple[AutoModelForCausalLM, AutoTokenizer]:
    """加载Qwen模型和分词器"""
    logger.info(f"加载模型 {model_dir}...")
    
    try:
        # 临时禁用torchvision以避免兼容性问题
        original_env = os.environ.get('TRANSFORMERS_OFFLINE', '0')
        os.environ['TRANSFORMERS_OFFLINE'] = '1'
        
        # 加载分词器
        tokenizer = AutoTokenizer.from_pretrained(
            model_dir,
            trust_remote_code=True,
            padding_side='right'
        )
        
        # 设置pad_token
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
            
        # 根据设备选择数据类型
        device = "cuda" if torch.cuda.is_available() else "cpu"
        torch_dtype = torch.bfloat16 if device == "cuda" else torch.float32
        
        logger.info(f"使用设备: {device}, 数据类型: {torch_dtype}")
        
        # 加载模型
        model = AutoModelForCausalLM.from_pretrained(
            model_dir,
            trust_remote_code=True,
            device_map='auto' if device == "cuda" else None,
            torch_dtype=torch_dtype,
            low_cpu_mem_usage=True,
            use_safetensors=True,
            attn_implementation="eager"
        )
        
        # 如果使用CPU，手动移动到设备
        if device == "cpu":
            model = model.to(device)
            
        model.eval()
        logger.info("模型加载完成！")
        
        # 恢复环境变量
        os.environ['TRANSFORMERS_OFFLINE'] = original_env
        
        return model, tokenizer
        
    except Exception as e:
        logger.error(f"加载模型失败: {e}")
        # 恢复环境变量
        os.environ['TRANSFORMERS_OFFLINE'] = original_env
        raise


def parse_thinking_content(tokenizer, output_ids: List[int]) -> Tuple[str, str]:
    """
    解析思考内容和最终回答
    基于您提供的例子实现
    """
    try:
        # 查找结束标记"</think>"的位置 (token ID: 151668)
        # 从后往前查找
        index = len(output_ids) - output_ids[::-1].index(151668)
    except ValueError:
        # 如果没有找到</think>标记，说明没有思考内容
        index = 0
    
    # 解码思考内容和最终回答
    thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
    content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
    
    return thinking_content, content


def generate_with_thinking(model: AutoModelForCausalLM, tokenizer: AutoTokenizer, 
                          prompt: str, enable_thinking: bool = True) -> Tuple[str, str]:
    """
    使用深度推理模式生成回复
    基于您提供的例子实现
    """
    # 准备模型输入
    messages = [
        {"role": "user", "content": prompt}
    ]
    
    # 应用聊天模板，启用思考模式
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True,
        enable_thinking=enable_thinking  # 选择是否打开深度推理模式
    )
    
    # 将输入文本转换为模型可处理的张量格式
    model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
    
    # 生成文本
    with torch.no_grad():
        generated_ids = model.generate(
            **model_inputs,
            max_new_tokens=32768,  # 设置最大生成token数量
            temperature=0.7,
            do_sample=True,
            top_p=0.9,
            top_k=50,
            repetition_penalty=1.1,
            pad_token_id=tokenizer.eos_token_id,
            eos_token_id=tokenizer.eos_token_id
        )
    
    # 提取新生成的token ID
    output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
    
    # 解析思考内容和最终回答
    thinking_content, content = parse_thinking_content(tokenizer, output_ids)
    
    return thinking_content, content


def chat_with_thinking(model: AutoModelForCausalLM, tokenizer: AutoTokenizer, 
                      max_conversation_length: int = 10) -> None:
    """交互式聊天循环，支持思考过程显示"""
    conversation = []
    logger.info("开始聊天（输入'退出'、'quit'或'exit'结束对话）")
    logger.info("输入'思考'或'thinking'切换思考模式显示")
    
    show_thinking = True  # 默认显示思考过程
    
    while True:
        try:
            user_input = input("\n你：").strip()
            
            # 退出条件
            if user_input.lower() in ["退出", "quit", "exit", "q"]:
                print("Qwen：再见！")
                break
            
            # 切换思考模式
            if user_input.lower() in ["思考", "thinking", "思考模式"]:
                show_thinking = not show_thinking
                mode = "开启" if show_thinking else "关闭"
                print(f"Qwen：思考模式已{mode}")
                continue
                
            # 空输入检查
            if not user_input:
                print("Qwen：请输入内容哦～")
                continue
            
            # 限制对话历史长度，避免内存问题
            if len(conversation) >= max_conversation_length * 2:
                conversation = conversation[-max_conversation_length * 2:]
                logger.info("对话历史已截断，保持最新对话内容")
            
            # 添加用户输入
            conversation.append({"role": "user", "content": user_input})
            
            print("Qwen：正在思考...")
            
            # 生成回复（带思考过程）
            thinking_content, content = generate_with_thinking(
                model, tokenizer, user_input, enable_thinking=show_thinking
            )
            
            # 显示思考过程（如果启用）
            if show_thinking and thinking_content:
                print(f"\n🤔 思考过程：")
                print(f"{thinking_content}")
                print(f"\n💬 回答：")
            
            # 显示最终回答
            print(f"Qwen：{content}")
            
            # 更新对话历史
            conversation.append({"role": "assistant", "content": content})
            
        except KeyboardInterrupt:
            print("\n\nQwen：检测到中断，再见！")
            break
        except Exception as e:
            logger.error(f"生成回复时出错: {e}")
            print("Qwen：抱歉，我遇到了一些问题，请重试。")


def demo_single_query(model: AutoModelForCausalLM, tokenizer: AutoTokenizer, 
                     prompt: str = "你好，请介绍一下自己") -> None:
    """演示单次查询的思考过程"""
    print(f"用户问题：{prompt}")
    print("=" * 50)
    
    # 生成回复
    thinking_content, content = generate_with_thinking(model, tokenizer, prompt)
    
    # 显示思考过程
    if thinking_content:
        print("🤔 思考过程：")
        print(thinking_content)
        print("\n" + "=" * 50)
    
    # 显示最终回答
    print("💬 最终回答：")
    print(content)


if __name__ == "__main__":
    MODEL_ID = "Qwen/Qwen3-0.6B"
    CACHE_DIR = "./modelscope_cache"
    REVISION = "master"

    try:
        logger.info("=== Qwen3-0.6B 深度推理模式演示 ===")
        
        # 检查并修复依赖问题
        logger.info("正在检查依赖包...")
        
        # 尝试修复torchvision兼容性问题
        try:
            os.environ['TRANSFORMERS_OFFLINE'] = '1'
            logger.info("已设置离线模式避免torchvision兼容性问题")
        except Exception as e:
            logger.warning(f"设置环境变量失败: {e}")
        
        # 下载或加载模型
        model_dir = download_qwen_model(MODEL_ID, CACHE_DIR, REVISION)
        model, tokenizer = load_qwen_model(model_dir)
        
        # 演示单次查询
        print("\n" + "=" * 60)
        print("演示单次查询的思考过程：")
        print("=" * 60)
        demo_single_query(model, tokenizer, "请解释一下什么是人工智能？")
        
        # 交互式聊天
        print("\n" + "=" * 60)
        print("开始交互式聊天：")
        print("=" * 60)
        chat_with_thinking(model, tokenizer)
        
    except Exception as e:
        logger.error(f"程序运行出错: {e}")
        print(f"错误: {e}")
        print("\n🔧 解决方案:")
        print("1. 重新安装torch和torchvision:")
        print("   pip uninstall torch torchvision")
        print("   pip install torch torchvision --index-url https://download.pytorch.org/whl/cpu")
        print("\n2. 或者安装兼容版本:")
        print("   pip install torch==2.0.1 torchvision==0.15.2")
        print("\n3. 如果仍有问题，尝试:")
        print("   pip install --upgrade transformers modelscope")
    finally:
        logger.info("程序结束")
