#!/usr/bin/env python3
"""
本地LLM安装脚本
自动安装和配置Qwen模型
"""

import os
import sys
import subprocess
import platform
import requests
import time
from pathlib import Path

def check_system():
    """检查系统信息"""
    print("=== 系统信息检查 ===")
    print(f"操作系统: {platform.system()} {platform.release()}")
    print(f"架构: {platform.machine()}")
    print(f"Python版本: {sys.version}")
    
    # 检查GPU
    try:
        import torch
        if torch.cuda.is_available():
            print(f"GPU: ✓ CUDA {torch.version.cuda}")
            print(f"GPU设备: {torch.cuda.get_device_name()}")
        else:
            print("GPU: ✗ 未检测到CUDA GPU")
    except ImportError:
        print("GPU: ✗ PyTorch未安装")
    
    print()

def install_ollama():
    """安装Ollama"""
    print("=== 安装Ollama ===")
    
    system = platform.system().lower()
    
    if system == "windows":
        print("Windows系统检测到")
        print("请手动安装Ollama:")
        print("1. 访问: https://ollama.ai/download")
        print("2. 下载Windows版本")
        print("3. 运行安装程序")
        print("4. 安装完成后，重新运行此脚本")
        
        # 检查是否已安装
        try:
            result = subprocess.run(['ollama', '--version'], 
                                  capture_output=True, text=True, timeout=5)
            if result.returncode == 0:
                print("✓ Ollama已安装:", result.stdout.strip())
                return True
            else:
                print("✗ Ollama未安装或未添加到PATH")
                return False
        except (subprocess.TimeoutExpired, FileNotFoundError):
            print("✗ Ollama未安装")
            return False
    
    elif system == "linux":
        print("Linux系统，使用curl安装...")
        try:
            subprocess.run([
                'curl', '-fsSL', 'https://ollama.ai/install.sh'
            ], check=True, shell=True)
            print("✓ Ollama安装完成")
            return True
        except subprocess.CalledProcessError:
            print("✗ Ollama安装失败")
            return False
    
    elif system == "darwin":  # macOS
        print("macOS系统，请使用Homebrew安装:")
        print("brew install ollama")
        return False
    
    return False

def download_qwen_model():
    """下载Qwen模型"""
    print("=== 下载Qwen模型 ===")
    
    # 检查Ollama是否运行
    try:
        response = requests.get("http://localhost:11434/api/version", timeout=5)
        if response.status_code == 200:
            print("✓ Ollama服务正在运行")
        else:
            print("✗ Ollama服务未响应")
            return False
    except requests.RequestException:
        print("启动Ollama服务...")
        # 在后台启动Ollama
        if platform.system().lower() == "windows":
            subprocess.Popen(['ollama', 'serve'], 
                           creationflags=subprocess.CREATE_NEW_CONSOLE)
        else:
            subprocess.Popen(['ollama', 'serve'])
        
        # 等待服务启动
        print("等待Ollama服务启动...")
        for i in range(30):
            try:
                response = requests.get("http://localhost:11434/api/version", timeout=2)
                if response.status_code == 200:
                    print("✓ Ollama服务已启动")
                    break
            except requests.RequestException:
                pass
            time.sleep(1)
            print(f"等待中... ({i+1}/30)")
        else:
            print("✗ Ollama服务启动超时")
            return False
    
    # 选择模型大小
    print("\n可用的Qwen模型:")
    models = [
        ("qwen:1.8b", "1.8B参数 (~1.1GB) - 快速，适合测试"),
        ("qwen:7b", "7B参数 (~4.1GB) - 平衡性能"),
        ("qwen:14b", "14B参数 (~8.2GB) - 高质量"),
        ("qwen:32b", "32B参数 (~18GB) - 最高质量")
    ]
    
    print("推荐选择:")
    for i, (model, desc) in enumerate(models, 1):
        print(f"{i}. {model} - {desc}")
    
    try:
        choice = input("\n请选择模型 (1-4, 默认选择2): ").strip()
        if not choice:
            choice = "2"
        choice = int(choice)
        if 1 <= choice <= len(models):
            selected_model = models[choice-1][0]
        else:
            selected_model = "qwen:7b"
    except ValueError:
        selected_model = "qwen:7b"
    
    print(f"\n开始下载模型: {selected_model}")
    print("这可能需要几分钟时间，请耐心等待...")
    
    try:
        # 使用ollama pull下载模型
        process = subprocess.Popen(
            ['ollama', 'pull', selected_model],
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            text=True,
            bufsize=1,
            universal_newlines=True
        )
        
        # 实时显示下载进度
        for line in process.stdout:
            print(line.strip())
        
        process.wait()
        
        if process.returncode == 0:
            print(f"✓ 模型 {selected_model} 下载完成")
            return selected_model
        else:
            print(f"✗ 模型下载失败")
            return None
            
    except subprocess.CalledProcessError as e:
        print(f"✗ 下载过程出错: {e}")
        return None

def configure_environment(model_name):
    """配置环境变量"""
    print("=== 配置环境 ===")
    
    env_file = Path("backend/.env")
    
    # 读取现有配置
    config = {}
    if env_file.exists():
        with open(env_file, 'r', encoding='utf-8') as f:
            for line in f:
                line = line.strip()
                if line and not line.startswith('#') and '=' in line:
                    key, value = line.split('=', 1)
                    config[key.strip()] = value.strip()
    
    # 更新配置
    config.update({
        'QWEN_API_URL': 'http://localhost:11434',
        'QWEN_MODEL_NAME': model_name,
        'LLM_MODE': 'local',
        'DEBUG': 'true'
    })
    
    # 写入配置文件
    with open(env_file, 'w', encoding='utf-8') as f:
        f.write("# 本地LLM配置\n")
        for key, value in config.items():
            f.write(f"{key}={value}\n")
    
    print(f"✓ 环境配置已保存到 {env_file}")

def test_model(model_name):
    """测试模型"""
    print("=== 测试模型 ===")
    
    try:
        # 测试API调用
        response = requests.post(
            "http://localhost:11434/api/generate",
            json={
                "model": model_name,
                "prompt": "你好，请简单介绍一下你自己。",
                "stream": False
            },
            timeout=30
        )
        
        if response.status_code == 200:
            result = response.json()
            print("✓ 模型测试成功")
            print("模型响应:", result.get('response', '')[:100] + "...")
            return True
        else:
            print(f"✗ 模型测试失败: HTTP {response.status_code}")
            return False
            
    except requests.RequestException as e:
        print(f"✗ 模型测试失败: {e}")
        return False

def main():
    """主函数"""
    print("🤖 本地LLM (Qwen) 安装向导")
    print("=" * 50)
    
    # 检查系统
    check_system()
    
    # 安装Ollama
    if not install_ollama():
        print("请先安装Ollama后再运行此脚本")
        return
    
    # 下载模型
    model_name = download_qwen_model()
    if not model_name:
        print("模型下载失败，请检查网络连接")
        return
    
    # 配置环境
    configure_environment(model_name)
    
    # 测试模型
    if test_model(model_name):
        print("\n🎉 本地LLM安装配置完成！")
        print("\n下一步:")
        print("1. 运行 'python start_server.py' 启动服务")
        print("2. 访问 http://localhost:8000/docs 查看API")
        print("3. 打开前端界面测试功能")
        print(f"\n当前模型: {model_name}")
        print("Ollama管理命令:")
        print("  - ollama list          # 查看已安装模型")
        print("  - ollama rm <model>    # 删除模型")
        print("  - ollama serve         # 启动服务")
    else:
        print("❌ 安装过程中出现问题，请检查配置")

if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("\n用户取消安装")
    except Exception as e:
        print(f"\n安装过程出现错误: {e}")
        print("请检查网络连接和系统权限")