import os
import torch
import logging
from pathlib import Path
from typing import Optional, Tuple
from modelscope import snapshot_download, AutoModelForCausalLM, AutoTokenizer
from modelscope.utils.constant import DownloadMode

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def find_existing_model(model_id: str, cache_dir: str, revision: str = 'master') -> Optional[str]:
    """查找已存在的模型路径（智能搜索）"""
    try:
        logger.info(f"正在查找模型 {model_id}...")

        # 构建可能的路径列表
        possible_paths = []

        # 1. 您的实际路径格式：cache_dir/Qwen/Qwen3-0___6B/
        if 'Qwen' in model_id:
            possible_paths.append(os.path.join(cache_dir, 'Qwen', 'Qwen3-0___6B'))

        # 2. 通用路径格式
        possible_paths.extend([
            # ModelScope标准缓存路径
            os.path.join(cache_dir, model_id.replace('/', '-'), revision),
            # 直接使用模型ID作为路径
            os.path.join(cache_dir, model_id),
            # 检查是否有snapshot_download的缓存
            os.path.join(cache_dir, 'hub', model_id.replace('/', '--'), revision),
            # 另一种可能的路径格式
            os.path.join(cache_dir, model_id.replace('/', '--')),
        ])

        # 3. 递归搜索cache_dir下的所有子目录
        if os.path.exists(cache_dir):
            for root, dirs, files in os.walk(cache_dir):
                if 'config.json' in files:
                    # 检查这个目录是否包含模型文件
                    model_files = ['model.safetensors', 'pytorch_model.bin', 'model.bin']
                    has_model_file = any(f in files for f in model_files)
                    if has_model_file:
                        possible_paths.append(root)

        # 去重并检查路径
        unique_paths = list(dict.fromkeys(possible_paths))  # 保持顺序的去重

        logger.info(f"检查 {len(unique_paths)} 个可能的路径:")
        for i, path in enumerate(unique_paths, 1):
            logger.info(f"  {i}. {path}")
            config_path = os.path.join(path, 'config.json')
            if os.path.exists(config_path):
                logger.info(f"✅ 找到已存在的模型: {path}")
                return path
            else:
                logger.debug(f"❌ 路径不存在: {path}")

        logger.warning(f"未找到模型 {model_id}，将尝试下载")
        return None
    except Exception as e:
        logger.warning(f"查找模型时出错: {e}")
        return None


def download_qwen_model(model_id: str, cache_dir: str, revision: str = 'master') -> str:
    """下载Qwen模型（优化版本，支持已下载模型检测）"""
    logger.info(f"检查模型 {model_id} 是否已下载...")

    # 首先尝试查找已存在的模型
    existing_path = find_existing_model(model_id, cache_dir, revision)
    if existing_path:
        logger.info("模型已存在，跳过下载")
        return existing_path

    # 如果模型不存在，则下载
    try:
        logger.info(f"开始下载模型至 {cache_dir}...")
        model_dir = snapshot_download(
            model_id=model_id,
            cache_dir=cache_dir,
            revision=revision
        )
        logger.info("模型下载完成！")
        return model_dir
    except Exception as e:
        logger.error(f"下载模型失败: {e}")
        raise


def load_qwen_model(model_dir: str) -> Tuple[AutoModelForCausalLM, AutoTokenizer]:
    """加载Qwen模型和分词器（优化版本，修复torchvision兼容性）"""
    logger.info(f"加载模型 {model_dir}...")

    try:
        # 临时禁用torchvision以避免兼容性问题
        import os
        original_env = os.environ.get('TRANSFORMERS_OFFLINE', '0')
        os.environ['TRANSFORMERS_OFFLINE'] = '1'

        # 加载分词器
        tokenizer = AutoTokenizer.from_pretrained(
            model_dir,
            trust_remote_code=True,
            padding_side='right'
        )

        # 设置pad_token
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token

        # 根据设备选择数据类型
        device = "cuda" if torch.cuda.is_available() else "cpu"
        torch_dtype = torch.bfloat16 if device == "cuda" else torch.float32

        logger.info(f"使用设备: {device}, 数据类型: {torch_dtype}")

        # 加载模型 - 使用更保守的参数
        model = AutoModelForCausalLM.from_pretrained(
            model_dir,
            trust_remote_code=True,
            device_map='auto' if device == "cuda" else None,
            torch_dtype=torch_dtype,
            low_cpu_mem_usage=True,
            # 添加这些参数来避免torchvision问题
            use_safetensors=True,
            attn_implementation="eager"  # 使用eager attention避免某些兼容性问题
        )

        # 如果使用CPU，手动移动到设备
        if device == "cpu":
            model = model.to(device)

        model.eval()
        logger.info("模型加载完成！")

        # 恢复环境变量
        os.environ['TRANSFORMERS_OFFLINE'] = original_env

        return model, tokenizer

    except Exception as e:
        logger.error(f"加载模型失败: {e}")
        # 恢复环境变量
        os.environ['TRANSFORMERS_OFFLINE'] = original_env
        raise


def chat_with_qwen(model: AutoModelForCausalLM, tokenizer: AutoTokenizer,
                   max_conversation_length: int = 10) -> None:
    """交互式聊天循环（优化版本）"""
    conversation = []
    logger.info("开始聊天（输入'退出'、'quit'或'exit'结束对话）")

    while True:
        try:
            user_input = input("\n你：").strip()

            # 退出条件
            if user_input.lower() in ["退出", "quit", "exit", "q"]:
                print("Qwen：再见！")
                break

            # 空输入检查
            if not user_input:
                print("Qwen：请输入内容哦～")
                continue

            # 限制对话历史长度，避免内存问题
            if len(conversation) >= max_conversation_length * 2:  # 每轮对话包含user和assistant
                conversation = conversation[-max_conversation_length * 2:]
                logger.info("对话历史已截断，保持最新对话内容")

            # 添加用户输入
            conversation.append({"role": "user", "content": user_input})

            # 构建提示词
            prompt = tokenizer.apply_chat_template(
                conversation,
                add_generation_prompt=True,
                tokenize=False
            )

            # 生成回复
            with torch.no_grad():
                inputs = tokenizer(prompt, return_tensors="pt").to(model.device)

                # 检查输入长度，避免超出模型限制
                max_length = getattr(model.config, 'max_position_embeddings', 2048)
                if inputs['input_ids'].shape[1] > max_length - 100:  # 预留生成空间
                    logger.warning("输入过长，截断对话历史")
                    # 截断输入
                    inputs['input_ids'] = inputs['input_ids'][:, -max_length + 100:]
                    inputs['attention_mask'] = inputs['attention_mask'][:, -max_length + 100:]

                outputs = model.generate(
                    **inputs,
                    max_new_tokens=512,
                    temperature=0.7,
                    do_sample=True,
                    top_p=0.9,
                    top_k=50,
                    repetition_penalty=1.1,
                    pad_token_id=tokenizer.eos_token_id,
                    eos_token_id=tokenizer.eos_token_id
                )

            # 解码回复
            response = tokenizer.decode(
                outputs[0][len(inputs["input_ids"][0]):],
                skip_special_tokens=True
            ).strip()

            print(f"Qwen：{response}")
            conversation.append({"role": "assistant", "content": response})

        except KeyboardInterrupt:
            print("\n\nQwen：检测到中断，再见！")
            break
        except Exception as e:
            logger.error(f"生成回复时出错: {e}")
            print("Qwen：抱歉，我遇到了一些问题，请重试。")


if __name__ == "__main__":
    MODEL_ID = "Qwen/Qwen3-0.6B"
    CACHE_DIR = "./modelscope_cache"
    REVISION = "master"

    try:
        logger.info("=== Qwen聊天机器人启动 ===")

        # 检查并修复依赖问题
        logger.info("正在检查依赖包...")

        # 尝试修复torchvision兼容性问题
        try:
            # 临时设置环境变量避免torchvision问题
            os.environ['TRANSFORMERS_OFFLINE'] = '1'
            logger.info("已设置离线模式避免torchvision兼容性问题")
        except Exception as e:
            logger.warning(f"设置环境变量失败: {e}")

        model_dir = download_qwen_model(MODEL_ID, CACHE_DIR, REVISION)
        model, tokenizer = load_qwen_model(model_dir)
        chat_with_qwen(model, tokenizer)

    except Exception as e:
        logger.error(f"程序运行出错: {e}")
        print(f"错误: {e}")
        print("\n🔧 解决方案:")
        print("1. 重新安装torch和torchvision:")
        print("   pip uninstall torch torchvision")
        print("   pip install torch torchvision --index-url https://download.pytorch.org/whl/cpu")
        print("\n2. 或者安装兼容版本:")
        print("   pip install torch==2.0.1 torchvision==0.15.2")
        print("\n3. 如果仍有问题，尝试:")
        print("   pip install --upgrade transformers modelscope")
    finally:
        logger.info("程序结束")
