#!/usr/bin/env python3
"""
直接安装Qwen模型脚本
使用transformers库直接加载本地Qwen模型
"""

import os
import sys
import subprocess
import platform
from pathlib import Path

def check_system():
    """检查系统信息"""
    print("=== 系统信息检查 ===")
    print(f"操作系统: {platform.system()} {platform.release()}")
    print(f"架构: {platform.machine()}")
    print(f"Python版本: {sys.version}")
    print()

def install_dependencies():
    """安装必要的依赖包"""
    print("=== 安装依赖包 ===")
    
    packages = [
        "torch>=2.0.0",
        "transformers>=4.30.0",
        "accelerate>=0.20.0",
        "sentence-transformers>=2.2.0",
        "langchain>=0.3.0",
        "langchain-community>=0.3.0",
        "fastapi>=0.100.0",
        "uvicorn>=0.23.0",
        "python-dotenv>=1.0.0",
        "arxiv>=2.1.0",
        "requests>=2.31.0"
    ]
    
    for package in packages:
        print(f"安装 {package}...")
        try:
            subprocess.check_call([sys.executable, "-m", "pip", "install", package])
            print(f"✓ {package} 安装成功")
        except subprocess.CalledProcessError as e:
            print(f"✗ {package} 安装失败: {e}")
    
    print()

def download_qwen_model():
    """下载Qwen模型"""
    print("=== 下载Qwen模型 ===")
    
    # 可用的Qwen模型
    models = [
        "Qwen/Qwen2.5-0.5B-Instruct",
        "Qwen/Qwen2.5-1.5B-Instruct", 
        "Qwen/Qwen2.5-3B-Instruct",
        "Qwen/Qwen2.5-7B-Instruct"
    ]
    
    print("可用的Qwen模型:")
    for i, model in enumerate(models, 1):
        print(f"{i}. {model}")
    
    try:
        choice = input("\n请选择模型 (1-4, 默认选择3): ").strip()
        if not choice:
            choice = "3"
        choice = int(choice)
        if 1 <= choice <= len(models):
            selected_model = models[choice-1]
        else:
            selected_model = models[2]  # 默认3B模型
    except ValueError:
        selected_model = models[2]
    
    print(f"\n选择模型: {selected_model}")
    print("开始下载模型，这可能需要一些时间...")
    
    # 测试模型加载
    try:
        print("测试模型加载...")
        from transformers import AutoTokenizer, AutoModelForCausalLM
        
        # 只下载tokenizer来测试连接
        print("下载tokenizer...")
        tokenizer = AutoTokenizer.from_pretrained(
            selected_model,
            trust_remote_code=True
        )
        print("✓ Tokenizer下载成功")
        
        # 保存模型配置
        return selected_model
        
    except Exception as e:
        print(f"✗ 模型下载失败: {e}")
        return None

def configure_environment(model_name):
    """配置环境变量"""
    print("=== 配置环境 ===")
    
    env_file = Path("backend/.env")
    
    # 读取现有配置
    config = {}
    if env_file.exists():
        with open(env_file, 'r', encoding='utf-8') as f:
            for line in f:
                line = line.strip()
                if line and not line.startswith('#') and '=' in line:
                    key, value = line.split('=', 1)
                    config[key.strip()] = value.strip()
    
    # 更新配置为直接加载模式
    config.update({
        'LLM_MODE': 'local_direct',
        'LOCAL_MODEL_NAME': model_name,
        'DEBUG': 'true',
        'PORT': '8000'
    })
    
    # 写入配置文件
    with open(env_file, 'w', encoding='utf-8') as f:
        f.write("# 本地Qwen模型配置\n")
        for key, value in config.items():
            f.write(f"{key}={value}\n")
    
    print(f"✓ 环境配置已保存到 {env_file}")

def create_model_loader():
    """创建模型加载脚本"""
    print("=== 创建模型加载器 ===")
    
    loader_content = '''import os
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

def load_qwen_model():
    """加载Qwen模型"""
    model_name = os.getenv("LOCAL_MODEL_NAME", "Qwen/Qwen2.5-3B-Instruct")
    
    print(f"正在加载模型: {model_name}")
    
    try:
        # 加载tokenizer
        tokenizer = AutoTokenizer.from_pretrained(
            model_name,
            trust_remote_code=True
        )
        
        # 加载模型
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            torch_dtype=torch.float16,
            device_map="auto",
            trust_remote_code=True
        )
        
        print("✓ 模型加载完成")
        return model, tokenizer
        
    except Exception as e:
        print(f"✗ 模型加载失败: {e}")
        return None, None

if __name__ == "__main__":
    model, tokenizer = load_qwen_model()
    if model and tokenizer:
        print("模型测试成功！")
    else:
        print("模型测试失败")
'''
    
    loader_file = Path("backend/services/model_loader.py")
    with open(loader_file, 'w', encoding='utf-8') as f:
        f.write(loader_content)
    
    print("✓ 模型加载器创建完成")

def test_installation():
    """测试安装"""
    print("=== 测试安装 ===")
    
    try:
        # 测试transformers导入
        import transformers
        print("✓ transformers库可用")
        
        # 测试torch
        import torch
        print(f"✓ PyTorch版本: {torch.__version__}")
        print(f"✓ CUDA可用: {torch.cuda.is_available()}")
        
        if torch.cuda.is_available():
            print(f"✓ GPU设备: {torch.cuda.get_device_name()}")
        
        return True
        
    except Exception as e:
        print(f"✗ 安装测试失败: {e}")
        return False

def main():
    """主函数"""
    print("🤖 本地Qwen模型直接安装向导")
    print("=" * 50)
    
    # 检查系统
    check_system()
    
    # 安装依赖
    install_dependencies()
    
    # 测试安装
    if not test_installation():
        print("依赖安装失败，请检查网络连接")
        return
    
    # 下载模型
    model_name = download_qwen_model()
    if not model_name:
        print("模型选择失败")
        return
    
    # 配置环境
    configure_environment(model_name)
    
    # 创建模型加载器
    create_model_loader()
    
    print("\n🎉 本地Qwen模型安装配置完成！")
    print("\n下一步:")
    print("1. 运行 'python start_server.py' 启动服务")
    print("2. 模型将在首次使用时自动下载")
    print("3. 访问 http://localhost:8000 测试功能")
    print(f"\n当前模型: {model_name}")
    print("\n注意: 首次运行时会下载完整的模型文件，请确保有足够的磁盘空间")

if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("\n用户取消安装")
    except Exception as e:
        print(f"\n安装过程出现错误: {e}")
        print("请检查网络连接和系统权限")