#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Ollama配置自动更新工具
根据实际安装的模型自动更新配置文件
"""

import requests
import yaml
import json
import sys
import os
from typing import List, Dict, Optional
from pathlib import Path

class OllamaConfigUpdater:
    def __init__(self, config_path: str = "config/settings.yaml", base_url: str = "http://localhost:11434"):
        self.config_path = config_path
        self.base_url = base_url.rstrip('/')
        
    def get_installed_models(self) -> List[Dict]:
        """获取已安装的模型列表"""
        try:
            response = requests.get(f"{self.base_url}/api/tags", timeout=10)
            response.raise_for_status()
            data = response.json()
            return data.get('models', [])
        except Exception as e:
            print(f"获取模型列表失败: {e}")
            return []
    
    def load_config(self) -> Dict:
        """加载配置文件"""
        try:
            with open(self.config_path, 'r', encoding='utf-8') as f:
                return yaml.safe_load(f)
        except Exception as e:
            print(f"加载配置文件失败: {e}")
            return {}
    
    def save_config(self, config: Dict):
        """保存配置文件"""
        try:
            with open(self.config_path, 'w', encoding='utf-8') as f:
                yaml.dump(config, f, default_flow_style=False, allow_unicode=True, sort_keys=False)
            print(f"✅ 配置文件已更新: {self.config_path}")
        except Exception as e:
            print(f"保存配置文件失败: {e}")
    
    def categorize_models(self, models: List[Dict]) -> Dict[str, List[Dict]]:
        """对模型进行分类"""
        chat_models = []
        embedding_models = []
        
        # 模型分类规则
        chat_keywords = ['llama', 'qwen', 'deepseek', 'gemma', 'mistral', 'codellama', 'phi']
        embedding_keywords = ['embed', 'bge', 'gte', 'm3e', 'text2vec', 'nomic']
        
        for model in models:
            name = model.get('name', '').lower()
            
            # 检查是否为嵌入模型
            is_embedding = any(keyword in name for keyword in embedding_keywords)
            
            if is_embedding:
                embedding_models.append(model)
            else:
                chat_models.append(model)
        
        return {
            'chat': chat_models,
            'embedding': embedding_models
        }
    
    def format_model_config(self, model: Dict, model_type: str) -> Dict:
        """格式化模型配置"""
        name = model.get('name', '')
        size = model.get('size', 0)
        size_mb = size / (1024 * 1024) if size > 0 else 0
        
        # 生成显示名称
        display_name = name.replace(':', ' ').replace('-', ' ').replace('_', ' ').title()
        
        # 生成描述
        if model_type == 'chat':
            description = f"本地对话模型 - {size_mb:.1f}MB"
        else:
            description = f"本地嵌入模型 - {size_mb:.1f}MB"
        
        return {
            'name': name,
            'display_name': display_name,
            'description': description,
            'type': model_type
        }
    
    def update_config(self, dry_run: bool = False) -> bool:
        """更新配置文件"""
        print("🔍 获取已安装的模型...")
        models = self.get_installed_models()
        
        if not models:
            print("❌ 未找到已安装的模型")
            return False
        
        print(f"✅ 找到 {len(models)} 个已安装模型")
        
        # 分类模型
        categorized = self.categorize_models(models)
        chat_models = categorized['chat']
        embedding_models = categorized['embedding']
        
        print(f"📝 对话模型: {len(chat_models)} 个")
        print(f"🔗 嵌入模型: {len(embedding_models)} 个")
        
        # 加载当前配置
        config = self.load_config()
        if not config:
            return False
        
        # 更新AI配置
        if 'ai' not in config:
            config['ai'] = {}
        
        if 'providers' not in config['ai']:
            config['ai']['providers'] = []
        
        # 查找或创建ollama提供商配置
        ollama_provider = None
        for provider in config['ai']['providers']:
            if provider.get('name') == 'ollama':
                ollama_provider = provider
                break
        
        if not ollama_provider:
            ollama_provider = {
                'name': 'ollama',
                'type': 'local',
                'base_url': self.base_url,
                'auto_discover_models': True,
                'model_discovery_endpoint': '/api/tags'
            }
            config['ai']['providers'].append(ollama_provider)
        
        # 设置默认模型（选择最小的对话模型）
        if chat_models:
            # 按大小排序，选择最小的
            chat_models.sort(key=lambda x: x.get('size', 0))
            default_model = chat_models[0]['name']
            ollama_provider['default_model'] = default_model
            print(f"🎯 设置默认模型: {default_model}")
        
        # 更新模型列表
        formatted_models = []
        
        # 添加对话模型
        for model in chat_models:
            formatted_models.append(self.format_model_config(model, 'chat'))
        
        # 添加嵌入模型
        for model in embedding_models:
            formatted_models.append(self.format_model_config(model, 'embedding'))
        
        ollama_provider['models'] = formatted_models
        
        if dry_run:
            print("\n📋 预览配置更改:")
            print("=" * 50)
            print(f"默认模型: {ollama_provider.get('default_model', '未设置')}")
            print(f"对话模型: {len(chat_models)} 个")
            print(f"嵌入模型: {len(embedding_models)} 个")
            print("\n前5个对话模型:")
            for i, model in enumerate(chat_models[:5], 1):
                print(f"  {i}. {model['name']}")
            return True
        else:
            # 保存配置
            self.save_config(config)
            return True
    
    def backup_config(self):
        """备份配置文件"""
        if os.path.exists(self.config_path):
            backup_path = f"{self.config_path}.backup"
            try:
                import shutil
                shutil.copy2(self.config_path, backup_path)
                print(f"📦 配置文件已备份: {backup_path}")
            except Exception as e:
                print(f"备份配置文件失败: {e}")

def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='Ollama配置自动更新工具')
    parser.add_argument('--dry-run', action='store_true', help='预览模式，不实际修改配置文件')
    parser.add_argument('--config', default='config/settings.yaml', help='配置文件路径')
    parser.add_argument('--backup', action='store_true', help='备份原配置文件')
    
    args = parser.parse_args()
    
    updater = OllamaConfigUpdater(args.config)
    
    # 检查Ollama服务
    try:
        response = requests.get(f"{updater.base_url}/api/tags", timeout=5)
        if response.status_code != 200:
            print("❌ Ollama服务未运行或无法连接")
            sys.exit(1)
    except:
        print("❌ Ollama服务未运行或无法连接")
        sys.exit(1)
    
    print("✅ Ollama服务运行正常")
    
    # 备份配置
    if args.backup:
        updater.backup_config()
    
    # 更新配置
    success = updater.update_config(dry_run=args.dry_run)
    
    if success:
        if args.dry_run:
            print("\n💡 使用 --backup 参数可以备份原配置文件")
            print("💡 去掉 --dry-run 参数可以实际更新配置文件")
        else:
            print("\n🎉 配置更新完成！")
            print("💡 建议重启应用以使新配置生效")
    else:
        print("\n❌ 配置更新失败")
        sys.exit(1)

if __name__ == "__main__":
    main() 