#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Ollama模型查询工具
用于查询本地ollama服务已安装的模型
"""

import requests
import json
import sys
from typing import List, Dict, Optional

class OllamaModelDiscovery:
    def __init__(self, base_url: str = "http://localhost:11434"):
        self.base_url = base_url.rstrip('/')
        
    def get_installed_models(self) -> List[Dict]:
        """获取已安装的模型列表"""
        try:
            response = requests.get(f"{self.base_url}/api/tags", timeout=10)
            response.raise_for_status()
            data = response.json()
            return data.get('models', [])
        except requests.exceptions.RequestException as e:
            print(f"连接ollama服务失败: {e}")
            return []
        except json.JSONDecodeError as e:
            print(f"解析响应失败: {e}")
            return []
    
    def get_model_info(self, model_name: str) -> Optional[Dict]:
        """获取特定模型的详细信息"""
        try:
            response = requests.post(
                f"{self.base_url}/api/show",
                json={"name": model_name},
                timeout=10
            )
            response.raise_for_status()
            return response.json()
        except requests.exceptions.RequestException as e:
            print(f"获取模型信息失败: {e}")
            return None
        except json.JSONDecodeError as e:
            print(f"解析响应失败: {e}")
            return None
    
    def format_model_list(self, models: List[Dict]) -> str:
        """格式化模型列表输出"""
        if not models:
            return "未找到已安装的模型"
        
        output = "已安装的Ollama模型:\n"
        output += "=" * 50 + "\n"
        
        for i, model in enumerate(models, 1):
            name = model.get('name', '未知')
            size = model.get('size', 0)
            size_mb = size / (1024 * 1024) if size > 0 else 0
            
            output += f"{i}. 模型名称: {name}\n"
            output += f"   大小: {size_mb:.1f} MB\n"
            output += f"   修改时间: {model.get('modified_at', '未知')}\n"
            output += "-" * 30 + "\n"
        
        return output
    
    def check_service_status(self) -> bool:
        """检查ollama服务状态"""
        try:
            response = requests.get(f"{self.base_url}/api/tags", timeout=5)
            return response.status_code == 200
        except:
            return False

def main():
    """主函数"""
    discovery = OllamaModelDiscovery()
    
    print("检查Ollama服务状态...")
    if not discovery.check_service_status():
        print("❌ Ollama服务未运行或无法连接")
        print("请确保:")
        print("1. Ollama已安装并运行")
        print("2. 服务地址为 http://localhost:11434")
        print("3. 防火墙未阻止连接")
        sys.exit(1)
    
    print("✅ Ollama服务运行正常")
    print()
    
    # 获取已安装的模型
    models = discovery.get_installed_models()
    
    if not models:
        print("未找到已安装的模型")
        print("您可以使用以下命令安装模型:")
        print("ollama pull llama3.2:3b")
        print("ollama pull qwen2.5:3b")
        sys.exit(0)
    
    # 显示模型列表
    print(discovery.format_model_list(models))
    
    # 生成配置文件建议
    print("配置文件建议:")
    print("在 config/settings.yaml 的 ai.providers 中添加:")
    print()
    
    for model in models:
        name = model.get('name', '')
        if name:
            print(f"        - name: \"{name}\"")
            print(f"          display_name: \"{name.replace(':', ' ').title()}\"")
            print(f"          description: \"本地模型 - {name}\"")

if __name__ == "__main__":
    main() 