#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Qwen3-Embedding-8B-GGUF 环境设置和测试脚本
检查并安装必要的依赖，然后测试模型

使用方法:
python3 setup_qwen3_env.py
"""

import os
import sys
import subprocess
import importlib.util

def check_and_install_package(package_name, import_name=None, pip_name=None):
    """
    检查并安装Python包
    
    Args:
        package_name: 包名
        import_name: 导入时的名称（如果与包名不同）
        pip_name: pip安装时的名称（如果与包名不同）
    """
    import_name = import_name or package_name
    pip_name = pip_name or package_name
    
    print(f"检查 {package_name}...")
    
    # 检查是否已安装
    spec = importlib.util.find_spec(import_name)
    if spec is not None:
        print(f"  ✅ {package_name} 已安装")
        return True
    
    # 尝试安装
    print(f"  📦 正在安装 {pip_name}...")
    try:
        result = subprocess.run([
            sys.executable, "-m", "pip", "install", pip_name
        ], capture_output=True, text=True, timeout=300)
        
        if result.returncode == 0:
            print(f"  ✅ {package_name} 安装成功")
            return True
        else:
            print(f"  ❌ {package_name} 安装失败: {result.stderr}")
            return False
    
    except subprocess.TimeoutExpired:
        print(f"  ⏰ {package_name} 安装超时")
        return False
    except Exception as e:
        print(f"  ❌ {package_name} 安装出错: {str(e)}")
        return False

def setup_environment():
    """设置环境"""
    
    print("🔧 设置Qwen3-Embedding环境")
    print("=" * 50)
    
    # 必需的包
    required_packages = [
        ("numpy", None, "numpy"),
        ("llama-cpp-python", "llama_cpp", "llama-cpp-python"),
    ]
    
    # 可选的包
    optional_packages = [
        ("transformers", None, "transformers"),
        ("torch", None, "torch"),
        ("sentence-transformers", "sentence_transformers", "sentence-transformers"),
    ]
    
    print("📋 检查必需依赖:")
    all_required_installed = True
    
    for package_name, import_name, pip_name in required_packages:
        if not check_and_install_package(package_name, import_name, pip_name):
            all_required_installed = False
    
    print(f"\n📋 检查可选依赖:")
    for package_name, import_name, pip_name in optional_packages:
        check_and_install_package(package_name, import_name, pip_name)
    
    print("\n" + "=" * 50)
    
    if all_required_installed:
        print("✅ 环境设置完成！")
        return True
    else:
        print("❌ 环境设置失败，请手动安装缺失的依赖")
        print("\n手动安装命令:")
        print("pip3 install numpy llama-cpp-python")
        return False

def simple_model_test():
    """简单的模型测试"""
    
    print("\n🧪 简单模型测试")
    print("=" * 50)
    
    # 模型路径
    model_path = "/Users/baimu/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B-GGUF/Qwen3-Embedding-8B-Q8_0.gguf"
    
    # 检查模型文件
    if not os.path.exists(model_path):
        print(f"❌ 模型文件不存在: {model_path}")
        print("\n请确认模型已通过以下命令下载:")
        print("modelscope download --model Qwen/Qwen3-Embedding-8B-GGUF")
        return False
    
    file_size = os.path.getsize(model_path) / (1024**3)  # GB
    print(f"📁 找到模型文件: {os.path.basename(model_path)}")
    print(f"📊 文件大小: {file_size:.2f} GB")
    
    # 尝试导入llama_cpp
    try:
        from llama_cpp import Llama
        print("✅ llama-cpp-python 导入成功")
    except ImportError as e:
        print(f"❌ 无法导入 llama-cpp-python: {e}")
        print("请尝试重新安装: pip3 install llama-cpp-python")
        return False
    
    # 尝试加载模型（简单测试）
    print("\n🔄 尝试加载模型...")
    try:
        model = Llama(
            model_path=model_path,
            n_ctx=512,  # 较小的上下文以节省内存
            n_threads=2,  # 较少的线程
            embedding=True,
            verbose=False
        )
        print("✅ 模型加载成功!")
        
        # 简单的向量化测试
        print("\n🔍 测试向量化功能...")
        test_text = "测试文本"
        embedding = model.embed(test_text)
        
        if embedding:
            print(f"✅ 向量化成功!")
            print(f"📏 向量维度: {len(embedding)}")
            print(f"📊 向量前5维: {embedding[:5]}")
        else:
            print("❌ 向量化失败")
            return False
        
        return True
        
    except Exception as e:
        print(f"❌ 模型加载失败: {str(e)}")
        print("\n可能的解决方案:")
        print("1. 确保有足够的内存（模型约8GB）")
        print("2. 检查模型文件是否完整")
        print("3. 尝试重新下载模型")
        return False

def main():
    """主函数"""
    
    print("🎯 Qwen3-Embedding-8B-GGUF 环境设置")
    print("=" * 60)
    print()
    
    # 设置环境
    if not setup_environment():
        return
    
    # 简单测试
    if simple_model_test():
        print("\n🎉 测试成功!")
        print("\n📝 下一步:")
        print("1. 运行完整测试: python3 test_qwen3_local.py")
        print("2. 查看向量化器代码: qwen3_local_vectorizer.py")
        print("3. 集成到您的项目中")
    else:
        print("\n❌ 测试失败")
        print("请检查上述错误信息并解决问题")
    
    print("\n" + "=" * 60)

if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("\n\n⏹️ 用户中断")
    except Exception as e:
        print(f"\n❌ 运行出错: {str(e)}")
        import traceback
        traceback.print_exc()
